aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/DocBook/filesystems.tmpl5
-rw-r--r--Documentation/hwmon/jc4221
-rw-r--r--Documentation/hwmon/k10temp8
-rw-r--r--Documentation/kernel-parameters.txt24
-rw-r--r--Documentation/networking/00-INDEX6
-rw-r--r--Documentation/networking/dns_resolver.txt9
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/irq.c13
-rw-r--r--arch/alpha/kernel/irq_alpha.c11
-rw-r--r--arch/alpha/kernel/irq_i8259.c18
-rw-r--r--arch/alpha/kernel/irq_impl.h8
-rw-r--r--arch/alpha/kernel/irq_pyxis.c20
-rw-r--r--arch/alpha/kernel/irq_srm.c16
-rw-r--r--arch/alpha/kernel/sys_alcor.c28
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c16
-rw-r--r--arch/alpha/kernel/sys_dp264.c52
-rw-r--r--arch/alpha/kernel/sys_eb64p.c18
-rw-r--r--arch/alpha/kernel/sys_eiger.c14
-rw-r--r--arch/alpha/kernel/sys_jensen.c24
-rw-r--r--arch/alpha/kernel/sys_marvel.c42
-rw-r--r--arch/alpha/kernel/sys_mikasa.c16
-rw-r--r--arch/alpha/kernel/sys_noritake.c16
-rw-r--r--arch/alpha/kernel/sys_rawhide.c17
-rw-r--r--arch/alpha/kernel/sys_rx164.c16
-rw-r--r--arch/alpha/kernel/sys_sable.c20
-rw-r--r--arch/alpha/kernel/sys_takara.c14
-rw-r--r--arch/alpha/kernel/sys_titan.c21
-rw-r--r--arch/alpha/kernel/sys_wildfire.c32
-rw-r--r--arch/arm/Kconfig25
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/.gitignore6
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h3
-rw-r--r--arch/arm/include/asm/tlb.h105
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/kernel/kprobes-decode.c2
-rw-r--r--arch/arm/kernel/pmu.c22
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/mailbox.c2
-rw-r--r--arch/arm/mach-omap2/mux.c2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c8
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h4
-rw-r--r--arch/arm/mach-omap2/smartreflex.c4
-rw-r--r--arch/arm/mach-omap2/timer-gp.c13
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h69
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/map.h83
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h193
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h168
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c15
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c15
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h149
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h1
-rw-r--r--arch/arm/mm/cache-l2x0.c6
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/plat-omap/mailbox.c11
-rw-r--r--arch/arm/plat-s5p/dev-uart.c12
-rw-r--r--arch/arm/plat-samsung/dev-ts.c1
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/s390/boot/compressed/misc.c5
-rw-r--r--arch/s390/include/asm/atomic.h26
-rw-r--r--arch/s390/include/asm/cache.h1
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/perf_event_p4.h1
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c14
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c11
-rw-r--r--arch/x86/kernel/early-quirks.c16
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-flush.c8
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-throttle.c29
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/evgpe.c17
-rw-r--r--drivers/acpi/acpica/evxfgpe.c42
-rw-r--r--drivers/acpi/debugfs.c20
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/tpm/tpm.c28
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c95
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/i2c/busses/i2c-omap.c35
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/infiniband/core/addr.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/wm8994-core.c18
-rw-r--r--drivers/net/Kconfig40
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/benet/be.h4
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_hw.h12
-rw-r--r--drivers/net/benet/be_main.c136
-rw-r--r--drivers/net/bnx2x/bnx2x.h28
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c65
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h20
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c25
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c25
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/bonding/bonding.h5
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c54
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c16
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/ethtool.c6
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c2
-rw-r--r--drivers/net/eql.c10
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/ftmac100.c1196
-rw-r--r--drivers/net/ftmac100.h180
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h4
-rw-r--r--drivers/net/igb/e1000_regs.h7
-rw-r--r--drivers/net/igb/igb_ethtool.c22
-rw-r--r--drivers/net/igb/igb_main.c16
-rw-r--r--drivers/net/igbvf/ethtool.c6
-rw-r--r--drivers/net/igbvf/netdev.c3
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c101
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c228
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c947
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c88
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c259
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h50
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c43
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c11
-rw-r--r--drivers/net/jme.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/mii.c14
-rw-r--r--drivers/net/mv643xx_eth.c74
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pptp.c8
-rw-r--r--drivers/net/qla3xxx.c10
-rw-r--r--drivers/net/r8169.c320
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/tlan.c309
-rw-r--r--drivers/net/tun.c83
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/vxge/vxge-main.c18
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile5
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c10
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c166
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h77
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c452
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c28
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1106
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h27
-rw-r--r--drivers/net/wireless/b43/xmit.c75
-rw-r--r--drivers/net/wireless/b43/xmit.h6
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c)11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-fh.h)5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-hw.h)9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.c)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.h)2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-rs.c)41
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945.c)257
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945.h)12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-legacy.h)30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-hw.h)26
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h (renamed from drivers/net/wireless/rtlwifi/rtl8192cu/fw.c)17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)172
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1369
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965.c)806
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2674
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c660
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c (renamed from drivers/net/wireless/iwlwifi/iwl3945-base.c)543
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3632
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig124
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c81
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c112
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c657
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c392
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c78
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwl8k.c54
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/p54/eeprom.c7
-rw-r--r--drivers/net/wireless/p54/fwio.c9
-rw-r--r--drivers/net/wireless/p54/lmac.h2
-rw-r--r--drivers/net/wireless/p54/main.c14
-rw-r--r--drivers/net/wireless/p54/p54.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c13
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c8
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig7
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c15
-rw-r--r--drivers/net/wireless/rtlwifi/core.c9
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h204
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.c)13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h (renamed from drivers/net/wireless/rtlwifi/rtl8192ce/fw.h)0
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c (renamed from drivers/net/wireless/rtlwifi/rtl8192cu/fw.h)11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c137
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h246
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c31
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/wl1251/main.c4
-rw-r--r--drivers/net/wireless/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c28
-rw-r--r--drivers/net/wireless/wl12xx/acx.h9
-rw-r--r--drivers/net/wireless/wl12xx/init.c2
-rw-r--r--drivers/net/wireless/wl12xx/main.c151
-rw-r--r--drivers/net/wireless/wl12xx/ps.c78
-rw-r--r--drivers/net/wireless/wl12xx/ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/rx.h4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c237
-rw-r--r--drivers/net/wireless/wl12xx/tx.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h37
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c5
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c28
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c5
-rw-r--r--drivers/staging/pohmelfs/config.c2
-rw-r--r--drivers/staging/winbond/wbusb.c7
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/musb/musb_core.h17
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c52
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/inode.c126
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/lzo.c21
-rw-r--r--fs/btrfs/relocation.c13
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/snap.c14
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/netmisc.c8
-rw-r--r--fs/cifs/sess.c8
-rw-r--r--fs/ecryptfs/dentry.c22
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h3
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ecryptfs/inode.c138
-rw-r--r--fs/eventfd.c12
-rw-r--r--fs/eventpoll.c95
-rw-r--r--fs/exofs/namei.c8
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c52
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/gfs2/main.c9
-rw-r--r--fs/hfs/dir.c50
-rw-r--r--fs/inode.c31
-rw-r--r--fs/internal.h2
-rw-r--r--fs/minix/namei.c8
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nilfs2/btnode.c5
-rw-r--r--fs/nilfs2/btnode.h1
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/nilfs2/page.c13
-rw-r--r--fs/nilfs2/page.h1
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/ocfs2/journal.h6
-rw-r--r--fs/ocfs2/refcounttree.c7
-rw-r--r--fs/ocfs2/super.c28
-rw-r--r--fs/partitions/ldm.c5
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/sysv/namei.c8
-rw-r--r--fs/udf/namei.c11
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_discard.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_fsops.c3
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/keys/rxrpc-type.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/dcbnl.h99
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/netdevice.h13
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nl80211.h3
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/rio_regs.h4
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/dcbnl.h9
-rw-r--r--include/net/dst.h25
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/inet_sock.h23
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip_fib.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h28
-rw-r--r--include/net/mac80211.h21
-rw-r--r--include/net/route.h77
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/udp.h11
-rw-r--r--include/net/udplite.h12
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/pcmcia/ds.h1
-rw-r--r--include/sound/wm8903.h10
-rw-r--r--include/trace/events/block.h6
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/irqdesc.c11
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/perf_event.c19
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/trace/blktrace.c16
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmscan.c32
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/atm/clip.c6
-rw-r--r--net/batman-adv/aggregation.c8
-rw-r--r--net/batman-adv/aggregation.h4
-rw-r--r--net/batman-adv/bat_sysfs.c51
-rw-r--r--net/batman-adv/gateway_client.c140
-rw-r--r--net/batman-adv/hard-interface.c407
-rw-r--r--net/batman-adv/hard-interface.h15
-rw-r--r--net/batman-adv/hash.c26
-rw-r--r--net/batman-adv/hash.h112
-rw-r--r--net/batman-adv/icmp_socket.c40
-rw-r--r--net/batman-adv/main.c13
-rw-r--r--net/batman-adv/main.h12
-rw-r--r--net/batman-adv/originator.c252
-rw-r--r--net/batman-adv/originator.h50
-rw-r--r--net/batman-adv/routing.c983
-rw-r--r--net/batman-adv/routing.h25
-rw-r--r--net/batman-adv/send.c103
-rw-r--r--net/batman-adv/send.h8
-rw-r--r--net/batman-adv/soft-interface.c74
-rw-r--r--net/batman-adv/soft-interface.h3
-rw-r--r--net/batman-adv/translation-table.c205
-rw-r--r--net/batman-adv/types.h48
-rw-r--r--net/batman-adv/unicast.c93
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c192
-rw-r--r--net/bluetooth/Kconfig6
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/bridge/br_netfilter.c9
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/ceph/messenger.c62
-rw-r--r--net/core/dev.c120
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/dcb/dcbnl.c148
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dccp/ipv4.c27
-rw-r--r--net/dccp/ipv6.c65
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/ipv4/af_inet.c30
-rw-r--r--net/ipv4/arp.c19
-rw-r--r--net/ipv4/datagram.c11
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_lookup.h7
-rw-r--r--net/ipv4/fib_semantics.c126
-rw-r--r--net/ipv4/fib_trie.c51
-rw-r--r--net/ipv4/icmp.c176
-rw-r--r--net/ipv4/igmp.c16
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/inetpeer.c75
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/ip_output.c296
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c60
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/raw.c11
-rw-r--r--net/ipv4/route.c293
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c28
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c110
-rw-r--r--net/ipv4/xfrm4_policy.c20
-rw-r--r--net/ipv6/af_inet6.c17
-rw-r--r--net/ipv6/datagram.c15
-rw-r--r--net/ipv6/icmp.c118
-rw-r--r--net/ipv6/inet6_connection_sock.c25
-rw-r--r--net/ipv6/ip6_output.c82
-rw-r--r--net/ipv6/ip6_tunnel.c19
-rw-r--r--net/ipv6/mcast.c19
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c3
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/route.c37
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/syncookies.c7
-rw-r--r--net/ipv6/tcp_ipv6.c57
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip.c10
-rw-r--r--net/llc/llc_input.c25
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/cfg.c68
-rw-r--r--net/mac80211/driver-ops.h35
-rw-r--r--net/mac80211/driver-trace.h33
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/ibss.c7
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c23
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/sta_info.h4
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/mac80211/tx.c164
-rw-r--r--net/netfilter/ipset/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c52
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c27
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c59
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/xt_TEE.c3
-rw-r--r--net/netfilter/xt_conntrack.c5
-rw-r--r--net/netlabel/netlabel_user.h6
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--net/rxrpc/ar-peer.c7
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/ulpqueue.c7
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/nl80211.c56
-rw-r--r--net/wireless/wext-compat.c4
-rw-r--r--net/xfrm/xfrm_policy.c66
-rw-r--r--net/xfrm/xfrm_user.c56
-rw-r--r--scripts/basic/fixdep.c12
-rw-r--r--security/commoncap.c3
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--sound/core/jack.c1
-rw-r--r--sound/pci/au88x0/au88x0_core.c14
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_conexant.c68
-rw-r--r--sound/pci/hda/patch_sigmatel.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8903.h2
-rw-r--r--sound/soc/codecs/wm8994.c202
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/imx/eukrea-tlv320.c2
-rw-r--r--sound/soc/pxa/e740_wm9705.c4
-rw-r--r--sound/soc/pxa/e750_wm9705.c4
-rw-r--r--sound/soc/pxa/e800_wm9712.c4
-rw-r--r--sound/soc/pxa/em-x270.c4
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c4
-rw-r--r--sound/soc/pxa/palm27x.c4
-rw-r--r--sound/soc/pxa/tosa.c4
-rw-r--r--sound/soc/pxa/zylonite.c4
-rw-r--r--sound/soc/soc-dapm.c23
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--sound/usb/caiaq/midi.c2
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/pcm.c7
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/builtin-timechart.c6
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/svghelper.c6
760 files changed, 44510 insertions, 9249 deletions
diff --git a/.gitignore b/.gitignore
index 8faa6c02b39e..5d56a3fd0de6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ modules.builtin
28*.gz 28*.gz
29*.bz2 29*.bz2
30*.lzma 30*.lzma
31*.xz
31*.lzo 32*.lzo
32*.patch 33*.patch
33*.gcno 34*.gcno
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 5e87ad58c0b5..f51f28531b8d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -82,6 +82,11 @@
82 </sect1> 82 </sect1>
83 </chapter> 83 </chapter>
84 84
85 <chapter id="fs_events">
86 <title>Events based on file descriptors</title>
87!Efs/eventfd.c
88 </chapter>
89
85 <chapter id="sysfs"> 90 <chapter id="sysfs">
86 <title>The Filesystem for Exporting Kernel Objects</title> 91 <title>The Filesystem for Exporting Kernel Objects</title>
87!Efs/sysfs/file.c 92!Efs/sysfs/file.c
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 0e76ef12e4c6..a22ecf48f255 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -51,7 +51,8 @@ Supported chips:
51 * JEDEC JC 42.4 compliant temperature sensor chips 51 * JEDEC JC 42.4 compliant temperature sensor chips
52 Prefix: 'jc42' 52 Prefix: 'jc42'
53 Addresses scanned: I2C 0x18 - 0x1f 53 Addresses scanned: I2C 0x18 - 0x1f
54 Datasheet: - 54 Datasheet:
55 http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
55 56
56Author: 57Author:
57 Guenter Roeck <guenter.roeck@ericsson.com> 58 Guenter Roeck <guenter.roeck@ericsson.com>
@@ -60,7 +61,11 @@ Author:
60Description 61Description
61----------- 62-----------
62 63
63This driver implements support for JEDEC JC 42.4 compliant temperature sensors. 64This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
65which are used on many DDR3 memory modules for mobile devices and servers. Some
66systems use the sensor to prevent memory overheating by automatically throttling
67the memory controller.
68
64The driver auto-detects the chips listed above, but can be manually instantiated 69The driver auto-detects the chips listed above, but can be manually instantiated
65to support other JC 42.4 compliant chips. 70to support other JC 42.4 compliant chips.
66 71
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
81which applies to all limits. This register can be written by writing into 86which applies to all limits. This register can be written by writing into
82temp1_crit_hyst. Other hysteresis attributes are read-only. 87temp1_crit_hyst. Other hysteresis attributes are read-only.
83 88
89If the BIOS has configured the sensor for automatic temperature management, it
90is likely that it has locked the registers, i.e., that the temperature limits
91cannot be changed.
92
84Sysfs entries 93Sysfs entries
85------------- 94-------------
86 95
87temp1_input Temperature (RO) 96temp1_input Temperature (RO)
88temp1_min Minimum temperature (RW) 97temp1_min Minimum temperature (RO or RW)
89temp1_max Maximum temperature (RW) 98temp1_max Maximum temperature (RO or RW)
90temp1_crit Critical high temperature (RW) 99temp1_crit Critical high temperature (RO or RW)
91 100
92temp1_crit_hyst Critical hysteresis temperature (RW) 101temp1_crit_hyst Critical hysteresis temperature (RO or RW)
93temp1_max_hyst Maximum hysteresis temperature (RO) 102temp1_max_hyst Maximum hysteresis temperature (RO)
94 103
95temp1_min_alarm Temperature low alarm 104temp1_min_alarm Temperature low alarm
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 6526eee525a6..d2b56a4fd1f5 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -9,6 +9,8 @@ Supported chips:
9 Socket S1G3: Athlon II, Sempron, Turion II 9 Socket S1G3: Athlon II, Sempron, Turion II
10* AMD Family 11h processors: 10* AMD Family 11h processors:
11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) 11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
12* AMD Family 12h processors: "Llano"
13* AMD Family 14h processors: "Brazos" (C/E/G-Series)
12 14
13 Prefix: 'k10temp' 15 Prefix: 'k10temp'
14 Addresses scanned: PCI space 16 Addresses scanned: PCI space
@@ -17,10 +19,14 @@ Supported chips:
17 http://support.amd.com/us/Processor_TechDocs/31116.pdf 19 http://support.amd.com/us/Processor_TechDocs/31116.pdf
18 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: 20 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
19 http://support.amd.com/us/Processor_TechDocs/41256.pdf 21 http://support.amd.com/us/Processor_TechDocs/41256.pdf
22 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
23 http://support.amd.com/us/Processor_TechDocs/43170.pdf
20 Revision Guide for AMD Family 10h Processors: 24 Revision Guide for AMD Family 10h Processors:
21 http://support.amd.com/us/Processor_TechDocs/41322.pdf 25 http://support.amd.com/us/Processor_TechDocs/41322.pdf
22 Revision Guide for AMD Family 11h Processors: 26 Revision Guide for AMD Family 11h Processors:
23 http://support.amd.com/us/Processor_TechDocs/41788.pdf 27 http://support.amd.com/us/Processor_TechDocs/41788.pdf
28 Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
29 http://support.amd.com/us/Processor_TechDocs/47534.pdf
24 AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: 30 AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
25 http://support.amd.com/us/Processor_TechDocs/43373.pdf 31 http://support.amd.com/us/Processor_TechDocs/43373.pdf
26 AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: 32 AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@@ -34,7 +40,7 @@ Description
34----------- 40-----------
35 41
36This driver permits reading of the internal temperature sensor of AMD 42This driver permits reading of the internal temperature sensor of AMD
37Family 10h and 11h processors. 43Family 10h/11h/12h/14h processors.
38 44
39All these processors have a sensor, but on those for Socket F or AM2+, 45All these processors have a sensor, but on those for Socket F or AM2+,
40the sensor may return inconsistent values (erratum 319). The driver 46the sensor may return inconsistent values (erratum 319). The driver
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 89835a4766a6..f4a04c0c7edc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
144and is between 256 and 4096 characters. It is defined in the file 144and is between 256 and 4096 characters. It is defined in the file
145./include/asm/setup.h as COMMAND_LINE_SIZE. 145./include/asm/setup.h as COMMAND_LINE_SIZE.
146 146
147Finally, the [KMG] suffix is commonly described after a number of kernel
148parameter values. These 'K', 'M', and 'G' letters represent the _binary_
149multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
150bytes respectively. Such letter suffixes can also be entirely omitted.
151
147 152
148 acpi= [HW,ACPI,X86] 153 acpi= [HW,ACPI,X86]
149 Advanced Configuration and Power Interface 154 Advanced Configuration and Power Interface
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
545 Format: 550 Format:
546 <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] 551 <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
547 552
548 crashkernel=nn[KMG]@ss[KMG] 553 crashkernel=size[KMG][@offset[KMG]]
549 [KNL] Reserve a chunk of physical memory to 554 [KNL] Using kexec, Linux can switch to a 'crash kernel'
550 hold a kernel to switch to with kexec on panic. 555 upon panic. This parameter reserves the physical
556 memory region [offset, offset + size] for that kernel
557 image. If '@offset' is omitted, then a suitable offset
558 is selected automatically. Check
559 Documentation/kdump/kdump.txt for further details.
551 560
552 crashkernel=range1:size1[,range2:size2,...][@offset] 561 crashkernel=range1:size1[,range2:size2,...][@offset]
553 [KNL] Same as above, but depends on the memory 562 [KNL] Same as above, but depends on the memory
554 in the running system. The syntax of range is 563 in the running system. The syntax of range is
555 start-[end] where start and end are both 564 start-[end] where start and end are both
556 a memory unit (amount[KMG]). See also 565 a memory unit (amount[KMG]). See also
557 Documentation/kdump/kdump.txt for a example. 566 Documentation/kdump/kdump.txt for an example.
558 567
559 cs89x0_dma= [HW,NET] 568 cs89x0_dma= [HW,NET]
560 Format: <dma> 569 Format: <dma>
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
1262 6 (KERN_INFO) informational 1271 6 (KERN_INFO) informational
1263 7 (KERN_DEBUG) debug-level messages 1272 7 (KERN_DEBUG) debug-level messages
1264 1273
1265 log_buf_len=n Sets the size of the printk ring buffer, in bytes. 1274 log_buf_len=n[KMG] Sets the size of the printk ring buffer,
1266 Format: { n | nk | nM } 1275 in bytes. n must be a power of two. The default
1267 n must be a power of two. The default size 1276 size is set in the kernel config file.
1268 is set in the kernel config file.
1269 1277
1270 logo.nologo [FB] Disables display of the built-in Linux logo. 1278 logo.nologo [FB] Disables display of the built-in Linux logo.
1271 This may be used to provide more screen space for 1279 This may be used to provide more screen space for
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index fe5c099b8fc8..4edd78dfb362 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -40,8 +40,6 @@ decnet.txt
40 - info on using the DECnet networking layer in Linux. 40 - info on using the DECnet networking layer in Linux.
41depca.txt 41depca.txt
42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
43dgrs.txt
44 - the Digi International RightSwitch SE-X Ethernet driver
45dmfe.txt 43dmfe.txt
46 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 44 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
47e100.txt 45e100.txt
@@ -50,8 +48,6 @@ e1000.txt
50 - info on Intel's E1000 line of gigabit ethernet boards 48 - info on Intel's E1000 line of gigabit ethernet boards
51eql.txt 49eql.txt
52 - serial IP load balancing 50 - serial IP load balancing
53ethertap.txt
54 - the Ethertap user space packet reception and transmission driver
55ewrk3.txt 51ewrk3.txt
56 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 52 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
57filter.txt 53filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
104 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 100 - TUN/TAP device driver, allowing user space Rx/Tx of packets.
105vortex.txt 101vortex.txt
106 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 102 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
107wavelan.txt
108 - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
109x25.txt 103x25.txt
110 - general info on X.25 development. 104 - general info on X.25 development.
111x25-iface.txt 105x25-iface.txt
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index aefd1e681804..04ca06325b08 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k
62 62
63 63
64
65===== 64=====
66USAGE 65USAGE
67===== 66=====
@@ -104,6 +103,14 @@ implemented in the module can be called after doing:
104 returned also. 103 returned also.
105 104
106 105
106===============================
107READING DNS KEYS FROM USERSPACE
108===============================
109
110Keys of dns_resolver type can be read from userspace using keyctl_read() or
111"keyctl read/print/pipe".
112
113
107========= 114=========
108MECHANISM 115MECHANISM
109========= 116=========
diff --git a/MAINTAINERS b/MAINTAINERS
index 0d83e5898233..75760e796ae3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -885,7 +885,7 @@ S: Supported
885 885
886ARM/QUALCOMM MSM MACHINE SUPPORT 886ARM/QUALCOMM MSM MACHINE SUPPORT
887M: David Brown <davidb@codeaurora.org> 887M: David Brown <davidb@codeaurora.org>
888M: Daniel Walker <dwalker@codeaurora.org> 888M: Daniel Walker <dwalker@fifo99.com>
889M: Bryan Huntsman <bryanh@codeaurora.org> 889M: Bryan Huntsman <bryanh@codeaurora.org>
890L: linux-arm-msm@vger.kernel.org 890L: linux-arm-msm@vger.kernel.org
891F: arch/arm/mach-msm/ 891F: arch/arm/mach-msm/
@@ -1467,6 +1467,7 @@ F: include/net/bluetooth/
1467 1467
1468BONDING DRIVER 1468BONDING DRIVER
1469M: Jay Vosburgh <fubar@us.ibm.com> 1469M: Jay Vosburgh <fubar@us.ibm.com>
1470M: Andy Gospodarek <andy@greyhouse.net>
1470L: netdev@vger.kernel.org 1471L: netdev@vger.kernel.org
1471W: http://sourceforge.net/projects/bonding/ 1472W: http://sourceforge.net/projects/bonding/
1472S: Supported 1473S: Supported
@@ -1692,6 +1693,13 @@ M: Andy Whitcroft <apw@canonical.com>
1692S: Supported 1693S: Supported
1693F: scripts/checkpatch.pl 1694F: scripts/checkpatch.pl
1694 1695
1696CHINESE DOCUMENTATION
1697M: Harry Wei <harryxiyou@gmail.com>
1698L: xiyoulinuxkernelgroup@googlegroups.com
1699L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
1700S: Maintained
1701F: Documentation/zh_CN/
1702
1695CISCO VIC ETHERNET NIC DRIVER 1703CISCO VIC ETHERNET NIC DRIVER
1696M: Christian Benvenuti <benve@cisco.com> 1704M: Christian Benvenuti <benve@cisco.com>
1697M: Vasanthy Kolluri <vkolluri@cisco.com> 1705M: Vasanthy Kolluri <vkolluri@cisco.com>
@@ -2027,7 +2035,7 @@ F: Documentation/scsi/dc395x.txt
2027F: drivers/scsi/dc395x.* 2035F: drivers/scsi/dc395x.*
2028 2036
2029DCCP PROTOCOL 2037DCCP PROTOCOL
2030M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 2038M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
2031L: dccp@vger.kernel.org 2039L: dccp@vger.kernel.org
2032W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2040W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
2033S: Maintained 2041S: Maintained
@@ -2874,7 +2882,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
2874L: lm-sensors@lm-sensors.org 2882L: lm-sensors@lm-sensors.org
2875W: http://www.lm-sensors.org/ 2883W: http://www.lm-sensors.org/
2876T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 2884T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
2877T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
2878T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git 2885T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
2879S: Maintained 2886S: Maintained
2880F: Documentation/hwmon/ 2887F: Documentation/hwmon/
@@ -5269,7 +5276,7 @@ S: Maintained
5269F: drivers/net/wireless/rtl818x/rtl8180/ 5276F: drivers/net/wireless/rtl818x/rtl8180/
5270 5277
5271RTL8187 WIRELESS DRIVER 5278RTL8187 WIRELESS DRIVER
5272M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> 5279M: Herton Ronaldo Krzesinski <herton@canonical.com>
5273M: Hin-Tak Leung <htl10@users.sourceforge.net> 5280M: Hin-Tak Leung <htl10@users.sourceforge.net>
5274M: Larry Finger <Larry.Finger@lwfinger.net> 5281M: Larry Finger <Larry.Finger@lwfinger.net>
5275L: linux-wireless@vger.kernel.org 5282L: linux-wireless@vger.kernel.org
@@ -6107,7 +6114,7 @@ S: Maintained
6107F: security/tomoyo/ 6114F: security/tomoyo/
6108 6115
6109TOPSTAR LAPTOP EXTRAS DRIVER 6116TOPSTAR LAPTOP EXTRAS DRIVER
6110M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> 6117M: Herton Ronaldo Krzesinski <herton@canonical.com>
6111L: platform-driver-x86@vger.kernel.org 6118L: platform-driver-x86@vger.kernel.org
6112S: Maintained 6119S: Maintained
6113F: drivers/platform/x86/topstar-laptop.c 6120F: drivers/platform/x86/topstar-laptop.c
diff --git a/Makefile b/Makefile
index 5e40aa2acbff..2f7d92255b57 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 38 3SUBLEVEL = 38
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc7
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 47f63d480141..cc31bec2e316 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -11,6 +11,7 @@ config ALPHA
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select GENERIC_IRQ_PROBE 12 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 13 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_HARDIRQS_NO_DEPRECATED
14 help 15 help
15 The Alpha is a 64-bit general-purpose processor designed and 16 The Alpha is a 64-bit general-purpose processor designed and
16 marketed by the Digital Equipment Corporation of blessed memory, 17 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 9ab234f48dd8..a19d60082299 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
44 44
45int irq_select_affinity(unsigned int irq) 45int irq_select_affinity(unsigned int irq)
46{ 46{
47 struct irq_desc *desc = irq_to_desc[irq]; 47 struct irq_data *data = irq_get_irq_data(irq);
48 struct irq_chip *chip;
48 static int last_cpu; 49 static int last_cpu;
49 int cpu = last_cpu + 1; 50 int cpu = last_cpu + 1;
50 51
51 if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 if (!data)
53 return 1;
54 chip = irq_data_get_irq_chip(data);
55
56 if (!chip->irq_set_affinity || irq_user_affinity[irq])
52 return 1; 57 return 1;
53 58
54 while (!cpu_possible(cpu) || 59 while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 61 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
57 last_cpu = cpu; 62 last_cpu = cpu;
58 63
59 cpumask_copy(desc->affinity, cpumask_of(cpu)); 64 cpumask_copy(data->affinity, cpumask_of(cpu));
60 get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 65 chip->irq_set_affinity(data, cpumask_of(cpu), false);
61 return 0; 66 return 0;
62} 67}
63#endif /* CONFIG_SMP */ 68#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 2d0679b60939..411ca11d0a18 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
228void __init 228void __init
229init_rtc_irq(void) 229init_rtc_irq(void)
230{ 230{
231 struct irq_desc *desc = irq_to_desc(RTC_IRQ); 231 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
232 232 handle_simple_irq, "RTC");
233 if (desc) { 233 setup_irq(RTC_IRQ, &timer_irqaction);
234 desc->status |= IRQ_DISABLED;
235 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
236 handle_simple_irq, "RTC");
237 setup_irq(RTC_IRQ, &timer_irqaction);
238 }
239} 234}
240 235
241/* Dummy irqactions. */ 236/* Dummy irqactions. */
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 956ea0ed1694..c7cc9813e45f 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
33} 33}
34 34
35inline void 35inline void
36i8259a_enable_irq(unsigned int irq) 36i8259a_enable_irq(struct irq_data *d)
37{ 37{
38 spin_lock(&i8259_irq_lock); 38 spin_lock(&i8259_irq_lock);
39 i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 39 i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
40 spin_unlock(&i8259_irq_lock); 40 spin_unlock(&i8259_irq_lock);
41} 41}
42 42
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
47} 47}
48 48
49void 49void
50i8259a_disable_irq(unsigned int irq) 50i8259a_disable_irq(struct irq_data *d)
51{ 51{
52 spin_lock(&i8259_irq_lock); 52 spin_lock(&i8259_irq_lock);
53 __i8259a_disable_irq(irq); 53 __i8259a_disable_irq(d->irq);
54 spin_unlock(&i8259_irq_lock); 54 spin_unlock(&i8259_irq_lock);
55} 55}
56 56
57void 57void
58i8259a_mask_and_ack_irq(unsigned int irq) 58i8259a_mask_and_ack_irq(struct irq_data *d)
59{ 59{
60 unsigned int irq = d->irq;
61
60 spin_lock(&i8259_irq_lock); 62 spin_lock(&i8259_irq_lock);
61 __i8259a_disable_irq(irq); 63 __i8259a_disable_irq(irq);
62 64
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
71 73
72struct irq_chip i8259a_irq_type = { 74struct irq_chip i8259a_irq_type = {
73 .name = "XT-PIC", 75 .name = "XT-PIC",
74 .unmask = i8259a_enable_irq, 76 .irq_unmask = i8259a_enable_irq,
75 .mask = i8259a_disable_irq, 77 .irq_mask = i8259a_disable_irq,
76 .mask_ack = i8259a_mask_and_ack_irq, 78 .irq_mask_ack = i8259a_mask_and_ack_irq,
77}; 79};
78 80
79void __init 81void __init
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index b63ccd7386f1..d507a234b05d 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
31 31
32extern void common_init_isa_dma(void); 32extern void common_init_isa_dma(void);
33 33
34extern void i8259a_enable_irq(unsigned int); 34extern void i8259a_enable_irq(struct irq_data *d);
35extern void i8259a_disable_irq(unsigned int); 35extern void i8259a_disable_irq(struct irq_data *d);
36extern void i8259a_mask_and_ack_irq(unsigned int); 36extern void i8259a_mask_and_ack_irq(struct irq_data *d);
37extern unsigned int i8259a_startup_irq(unsigned int);
38extern void i8259a_end_irq(unsigned int);
39extern struct irq_chip i8259a_irq_type; 37extern struct irq_chip i8259a_irq_type;
40extern void init_i8259a_irqs(void); 38extern void init_i8259a_irqs(void);
41 39
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 2863458c853e..b30227fa7f5f 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
29} 29}
30 30
31static inline void 31static inline void
32pyxis_enable_irq(unsigned int irq) 32pyxis_enable_irq(struct irq_data *d)
33{ 33{
34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
35} 35}
36 36
37static void 37static void
38pyxis_disable_irq(unsigned int irq) 38pyxis_disable_irq(struct irq_data *d)
39{ 39{
40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
41} 41}
42 42
43static void 43static void
44pyxis_mask_and_ack_irq(unsigned int irq) 44pyxis_mask_and_ack_irq(struct irq_data *d)
45{ 45{
46 unsigned long bit = 1UL << (irq - 16); 46 unsigned long bit = 1UL << (d->irq - 16);
47 unsigned long mask = cached_irq_mask &= ~bit; 47 unsigned long mask = cached_irq_mask &= ~bit;
48 48
49 /* Disable the interrupt. */ 49 /* Disable the interrupt. */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
58 58
59static struct irq_chip pyxis_irq_type = { 59static struct irq_chip pyxis_irq_type = {
60 .name = "PYXIS", 60 .name = "PYXIS",
61 .mask_ack = pyxis_mask_and_ack_irq, 61 .irq_mask_ack = pyxis_mask_and_ack_irq,
62 .mask = pyxis_disable_irq, 62 .irq_mask = pyxis_disable_irq,
63 .unmask = pyxis_enable_irq, 63 .irq_unmask = pyxis_enable_irq,
64}; 64};
65 65
66void 66void
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
103 if ((ignore_mask >> i) & 1) 103 if ((ignore_mask >> i) & 1)
104 continue; 104 continue;
105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
106 irq_to_desc(i)->status |= IRQ_LEVEL; 106 irq_set_status_flags(i, IRQ_LEVEL);
107 } 107 }
108 108
109 setup_irq(16+7, &isa_cascade_irqaction); 109 setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 0e57e828b413..82a47bba41c4 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -18,27 +18,27 @@
18DEFINE_SPINLOCK(srm_irq_lock); 18DEFINE_SPINLOCK(srm_irq_lock);
19 19
20static inline void 20static inline void
21srm_enable_irq(unsigned int irq) 21srm_enable_irq(struct irq_data *d)
22{ 22{
23 spin_lock(&srm_irq_lock); 23 spin_lock(&srm_irq_lock);
24 cserve_ena(irq - 16); 24 cserve_ena(d->irq - 16);
25 spin_unlock(&srm_irq_lock); 25 spin_unlock(&srm_irq_lock);
26} 26}
27 27
28static void 28static void
29srm_disable_irq(unsigned int irq) 29srm_disable_irq(struct irq_data *d)
30{ 30{
31 spin_lock(&srm_irq_lock); 31 spin_lock(&srm_irq_lock);
32 cserve_dis(irq - 16); 32 cserve_dis(d->irq - 16);
33 spin_unlock(&srm_irq_lock); 33 spin_unlock(&srm_irq_lock);
34} 34}
35 35
36/* Handle interrupts from the SRM, assuming no additional weirdness. */ 36/* Handle interrupts from the SRM, assuming no additional weirdness. */
37static struct irq_chip srm_irq_type = { 37static struct irq_chip srm_irq_type = {
38 .name = "SRM", 38 .name = "SRM",
39 .unmask = srm_enable_irq, 39 .irq_unmask = srm_enable_irq,
40 .mask = srm_disable_irq, 40 .irq_mask = srm_disable_irq,
41 .mask_ack = srm_disable_irq, 41 .irq_mask_ack = srm_disable_irq,
42}; 42};
43 43
44void __init 44void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
52 if (i < 64 && ((ignore_mask >> i) & 1)) 52 if (i < 64 && ((ignore_mask >> i) & 1))
53 continue; 53 continue;
54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
55 irq_to_desc(i)->status |= IRQ_LEVEL; 55 irq_set_status_flags(i, IRQ_LEVEL);
56 } 56 }
57} 57}
58 58
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 7bef61768236..88d95e872f55 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47alcor_enable_irq(unsigned int irq) 47alcor_enable_irq(struct irq_data *d)
48{ 48{
49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
50} 50}
51 51
52static void 52static void
53alcor_disable_irq(unsigned int irq) 53alcor_disable_irq(struct irq_data *d)
54{ 54{
55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
56} 56}
57 57
58static void 58static void
59alcor_mask_and_ack_irq(unsigned int irq) 59alcor_mask_and_ack_irq(struct irq_data *d)
60{ 60{
61 alcor_disable_irq(irq); 61 alcor_disable_irq(d);
62 62
63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
64 *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 64 *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
65 *(vuip)GRU_INT_CLEAR = 0; mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb();
66} 66}
67 67
68static void 68static void
69alcor_isa_mask_and_ack_irq(unsigned int irq) 69alcor_isa_mask_and_ack_irq(struct irq_data *d)
70{ 70{
71 i8259a_mask_and_ack_irq(irq); 71 i8259a_mask_and_ack_irq(d);
72 72
73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
77 77
78static struct irq_chip alcor_irq_type = { 78static struct irq_chip alcor_irq_type = {
79 .name = "ALCOR", 79 .name = "ALCOR",
80 .unmask = alcor_enable_irq, 80 .irq_unmask = alcor_enable_irq,
81 .mask = alcor_disable_irq, 81 .irq_mask = alcor_disable_irq,
82 .mask_ack = alcor_mask_and_ack_irq, 82 .irq_mask_ack = alcor_mask_and_ack_irq,
83}; 83};
84 84
85static void 85static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
126 if (i >= 16+20 && i <= 16+30) 126 if (i >= 16+20 && i <= 16+30)
127 continue; 127 continue;
128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
129 irq_to_desc(i)->status |= IRQ_LEVEL; 129 irq_set_status_flags(i, IRQ_LEVEL);
130 } 130 }
131 i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 131 i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
132 132
133 init_i8259a_irqs(); 133 init_i8259a_irqs();
134 common_init_isa_dma(); 134 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index b0c916493aea..57eb6307bc27 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
46} 46}
47 47
48static inline void 48static inline void
49cabriolet_enable_irq(unsigned int irq) 49cabriolet_enable_irq(struct irq_data *d)
50{ 50{
51 cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 51 cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
52} 52}
53 53
54static void 54static void
55cabriolet_disable_irq(unsigned int irq) 55cabriolet_disable_irq(struct irq_data *d)
56{ 56{
57 cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 57 cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
58} 58}
59 59
60static struct irq_chip cabriolet_irq_type = { 60static struct irq_chip cabriolet_irq_type = {
61 .name = "CABRIOLET", 61 .name = "CABRIOLET",
62 .unmask = cabriolet_enable_irq, 62 .irq_unmask = cabriolet_enable_irq,
63 .mask = cabriolet_disable_irq, 63 .irq_mask = cabriolet_disable_irq,
64 .mask_ack = cabriolet_disable_irq, 64 .irq_mask_ack = cabriolet_disable_irq,
65}; 65};
66 66
67static void 67static void
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
107 for (i = 16; i < 35; ++i) { 107 for (i = 16; i < 35; ++i) {
108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 108 set_irq_chip_and_handler(i, &cabriolet_irq_type,
109 handle_level_irq); 109 handle_level_irq);
110 irq_to_desc(i)->status |= IRQ_LEVEL; 110 irq_set_status_flags(i, IRQ_LEVEL);
111 } 111 }
112 } 112 }
113 113
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index edad5f759ccd..481df4ecb651 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
98} 98}
99 99
100static void 100static void
101dp264_enable_irq(unsigned int irq) 101dp264_enable_irq(struct irq_data *d)
102{ 102{
103 spin_lock(&dp264_irq_lock); 103 spin_lock(&dp264_irq_lock);
104 cached_irq_mask |= 1UL << irq; 104 cached_irq_mask |= 1UL << d->irq;
105 tsunami_update_irq_hw(cached_irq_mask); 105 tsunami_update_irq_hw(cached_irq_mask);
106 spin_unlock(&dp264_irq_lock); 106 spin_unlock(&dp264_irq_lock);
107} 107}
108 108
109static void 109static void
110dp264_disable_irq(unsigned int irq) 110dp264_disable_irq(struct irq_data *d)
111{ 111{
112 spin_lock(&dp264_irq_lock); 112 spin_lock(&dp264_irq_lock);
113 cached_irq_mask &= ~(1UL << irq); 113 cached_irq_mask &= ~(1UL << d->irq);
114 tsunami_update_irq_hw(cached_irq_mask); 114 tsunami_update_irq_hw(cached_irq_mask);
115 spin_unlock(&dp264_irq_lock); 115 spin_unlock(&dp264_irq_lock);
116} 116}
117 117
118static void 118static void
119clipper_enable_irq(unsigned int irq) 119clipper_enable_irq(struct irq_data *d)
120{ 120{
121 spin_lock(&dp264_irq_lock); 121 spin_lock(&dp264_irq_lock);
122 cached_irq_mask |= 1UL << (irq - 16); 122 cached_irq_mask |= 1UL << (d->irq - 16);
123 tsunami_update_irq_hw(cached_irq_mask); 123 tsunami_update_irq_hw(cached_irq_mask);
124 spin_unlock(&dp264_irq_lock); 124 spin_unlock(&dp264_irq_lock);
125} 125}
126 126
127static void 127static void
128clipper_disable_irq(unsigned int irq) 128clipper_disable_irq(struct irq_data *d)
129{ 129{
130 spin_lock(&dp264_irq_lock); 130 spin_lock(&dp264_irq_lock);
131 cached_irq_mask &= ~(1UL << (irq - 16)); 131 cached_irq_mask &= ~(1UL << (d->irq - 16));
132 tsunami_update_irq_hw(cached_irq_mask); 132 tsunami_update_irq_hw(cached_irq_mask);
133 spin_unlock(&dp264_irq_lock); 133 spin_unlock(&dp264_irq_lock);
134} 134}
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
149} 149}
150 150
151static int 151static int
152dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 152dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
153{ 153 bool force)
154{
154 spin_lock(&dp264_irq_lock); 155 spin_lock(&dp264_irq_lock);
155 cpu_set_irq_affinity(irq, *affinity); 156 cpu_set_irq_affinity(d->irq, *affinity);
156 tsunami_update_irq_hw(cached_irq_mask); 157 tsunami_update_irq_hw(cached_irq_mask);
157 spin_unlock(&dp264_irq_lock); 158 spin_unlock(&dp264_irq_lock);
158 159
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
160} 161}
161 162
162static int 163static int
163clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 164clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
164{ 165 bool force)
166{
165 spin_lock(&dp264_irq_lock); 167 spin_lock(&dp264_irq_lock);
166 cpu_set_irq_affinity(irq - 16, *affinity); 168 cpu_set_irq_affinity(d->irq - 16, *affinity);
167 tsunami_update_irq_hw(cached_irq_mask); 169 tsunami_update_irq_hw(cached_irq_mask);
168 spin_unlock(&dp264_irq_lock); 170 spin_unlock(&dp264_irq_lock);
169 171
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
171} 173}
172 174
173static struct irq_chip dp264_irq_type = { 175static struct irq_chip dp264_irq_type = {
174 .name = "DP264", 176 .name = "DP264",
175 .unmask = dp264_enable_irq, 177 .irq_unmask = dp264_enable_irq,
176 .mask = dp264_disable_irq, 178 .irq_mask = dp264_disable_irq,
177 .mask_ack = dp264_disable_irq, 179 .irq_mask_ack = dp264_disable_irq,
178 .set_affinity = dp264_set_affinity, 180 .irq_set_affinity = dp264_set_affinity,
179}; 181};
180 182
181static struct irq_chip clipper_irq_type = { 183static struct irq_chip clipper_irq_type = {
182 .name = "CLIPPER", 184 .name = "CLIPPER",
183 .unmask = clipper_enable_irq, 185 .irq_unmask = clipper_enable_irq,
184 .mask = clipper_disable_irq, 186 .irq_mask = clipper_disable_irq,
185 .mask_ack = clipper_disable_irq, 187 .irq_mask_ack = clipper_disable_irq,
186 .set_affinity = clipper_set_affinity, 188 .irq_set_affinity = clipper_set_affinity,
187}; 189};
188 190
189static void 191static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
268{ 270{
269 long i; 271 long i;
270 for (i = imin; i <= imax; ++i) { 272 for (i = imin; i <= imax; ++i) {
271 irq_to_desc(i)->status |= IRQ_LEVEL;
272 set_irq_chip_and_handler(i, ops, handle_level_irq); 273 set_irq_chip_and_handler(i, ops, handle_level_irq);
274 irq_set_status_flags(i, IRQ_LEVEL);
273 } 275 }
274} 276}
275 277
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ae5f29d127b0..402e908ffb3e 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47eb64p_enable_irq(unsigned int irq) 47eb64p_enable_irq(struct irq_data *d)
48{ 48{
49 eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 49 eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
50} 50}
51 51
52static void 52static void
53eb64p_disable_irq(unsigned int irq) 53eb64p_disable_irq(struct irq_data *d)
54{ 54{
55 eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 55 eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
56} 56}
57 57
58static struct irq_chip eb64p_irq_type = { 58static struct irq_chip eb64p_irq_type = {
59 .name = "EB64P", 59 .name = "EB64P",
60 .unmask = eb64p_enable_irq, 60 .irq_unmask = eb64p_enable_irq,
61 .mask = eb64p_disable_irq, 61 .irq_mask = eb64p_disable_irq,
62 .mask_ack = eb64p_disable_irq, 62 .irq_mask_ack = eb64p_disable_irq,
63}; 63};
64 64
65static void 65static void
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
118 init_i8259a_irqs(); 118 init_i8259a_irqs();
119 119
120 for (i = 16; i < 32; ++i) { 120 for (i = 16; i < 32; ++i) {
121 irq_to_desc(i)->status |= IRQ_LEVEL;
122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
123 } 122 irq_set_status_flags(i, IRQ_LEVEL);
123 }
124 124
125 common_init_isa_dma(); 125 common_init_isa_dma();
126 setup_irq(16+5, &isa_cascade_irqaction); 126 setup_irq(16+5, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 1121bc5c6c6c..0b44a54c1522 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
51} 51}
52 52
53static inline void 53static inline void
54eiger_enable_irq(unsigned int irq) 54eiger_enable_irq(struct irq_data *d)
55{ 55{
56 unsigned int irq = d->irq;
56 unsigned long mask; 57 unsigned long mask;
57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
58 eiger_update_irq_hw(irq, mask); 59 eiger_update_irq_hw(irq, mask);
59} 60}
60 61
61static void 62static void
62eiger_disable_irq(unsigned int irq) 63eiger_disable_irq(struct irq_data *d)
63{ 64{
65 unsigned int irq = d->irq;
64 unsigned long mask; 66 unsigned long mask;
65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
66 eiger_update_irq_hw(irq, mask); 68 eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
68 70
69static struct irq_chip eiger_irq_type = { 71static struct irq_chip eiger_irq_type = {
70 .name = "EIGER", 72 .name = "EIGER",
71 .unmask = eiger_enable_irq, 73 .irq_unmask = eiger_enable_irq,
72 .mask = eiger_disable_irq, 74 .irq_mask = eiger_disable_irq,
73 .mask_ack = eiger_disable_irq, 75 .irq_mask_ack = eiger_disable_irq,
74}; 76};
75 77
76static void 78static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
136 init_i8259a_irqs(); 138 init_i8259a_irqs();
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142} 144}
143 145
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 34f55e03d331..00341b75c8b2 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -63,34 +63,34 @@
63 */ 63 */
64 64
65static void 65static void
66jensen_local_enable(unsigned int irq) 66jensen_local_enable(struct irq_data *d)
67{ 67{
68 /* the parport is really hw IRQ 1, silly Jensen. */ 68 /* the parport is really hw IRQ 1, silly Jensen. */
69 if (irq == 7) 69 if (d->irq == 7)
70 i8259a_enable_irq(1); 70 i8259a_enable_irq(d);
71} 71}
72 72
73static void 73static void
74jensen_local_disable(unsigned int irq) 74jensen_local_disable(struct irq_data *d)
75{ 75{
76 /* the parport is really hw IRQ 1, silly Jensen. */ 76 /* the parport is really hw IRQ 1, silly Jensen. */
77 if (irq == 7) 77 if (d->irq == 7)
78 i8259a_disable_irq(1); 78 i8259a_disable_irq(d);
79} 79}
80 80
81static void 81static void
82jensen_local_mask_ack(unsigned int irq) 82jensen_local_mask_ack(struct irq_data *d)
83{ 83{
84 /* the parport is really hw IRQ 1, silly Jensen. */ 84 /* the parport is really hw IRQ 1, silly Jensen. */
85 if (irq == 7) 85 if (d->irq == 7)
86 i8259a_mask_and_ack_irq(1); 86 i8259a_mask_and_ack_irq(d);
87} 87}
88 88
89static struct irq_chip jensen_local_irq_type = { 89static struct irq_chip jensen_local_irq_type = {
90 .name = "LOCAL", 90 .name = "LOCAL",
91 .unmask = jensen_local_enable, 91 .irq_unmask = jensen_local_enable,
92 .mask = jensen_local_disable, 92 .irq_mask = jensen_local_disable,
93 .mask_ack = jensen_local_mask_ack, 93 .irq_mask_ack = jensen_local_mask_ack,
94}; 94};
95 95
96static void 96static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 2bfc9f1b1ddc..e61910734e41 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
104} 104}
105 105
106static void 106static void
107io7_enable_irq(unsigned int irq) 107io7_enable_irq(struct irq_data *d)
108{ 108{
109 volatile unsigned long *ctl; 109 volatile unsigned long *ctl;
110 unsigned int irq = d->irq;
110 struct io7 *io7; 111 struct io7 *io7;
111 112
112 ctl = io7_get_irq_ctl(irq, &io7); 113 ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
115 __func__, irq); 116 __func__, irq);
116 return; 117 return;
117 } 118 }
118 119
119 spin_lock(&io7->irq_lock); 120 spin_lock(&io7->irq_lock);
120 *ctl |= 1UL << 24; 121 *ctl |= 1UL << 24;
121 mb(); 122 mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
124} 125}
125 126
126static void 127static void
127io7_disable_irq(unsigned int irq) 128io7_disable_irq(struct irq_data *d)
128{ 129{
129 volatile unsigned long *ctl; 130 volatile unsigned long *ctl;
131 unsigned int irq = d->irq;
130 struct io7 *io7; 132 struct io7 *io7;
131 133
132 ctl = io7_get_irq_ctl(irq, &io7); 134 ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
135 __func__, irq); 137 __func__, irq);
136 return; 138 return;
137 } 139 }
138 140
139 spin_lock(&io7->irq_lock); 141 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24); 142 *ctl &= ~(1UL << 24);
141 mb(); 143 mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
144} 146}
145 147
146static void 148static void
147marvel_irq_noop(unsigned int irq) 149marvel_irq_noop(struct irq_data *d)
148{ 150{
149 return; 151 return;
150}
151
152static unsigned int
153marvel_irq_noop_return(unsigned int irq)
154{
155 return 0;
156} 152}
157 153
158static struct irq_chip marvel_legacy_irq_type = { 154static struct irq_chip marvel_legacy_irq_type = {
159 .name = "LEGACY", 155 .name = "LEGACY",
160 .mask = marvel_irq_noop, 156 .irq_mask = marvel_irq_noop,
161 .unmask = marvel_irq_noop, 157 .irq_unmask = marvel_irq_noop,
162}; 158};
163 159
164static struct irq_chip io7_lsi_irq_type = { 160static struct irq_chip io7_lsi_irq_type = {
165 .name = "LSI", 161 .name = "LSI",
166 .unmask = io7_enable_irq, 162 .irq_unmask = io7_enable_irq,
167 .mask = io7_disable_irq, 163 .irq_mask = io7_disable_irq,
168 .mask_ack = io7_disable_irq, 164 .irq_mask_ack = io7_disable_irq,
169}; 165};
170 166
171static struct irq_chip io7_msi_irq_type = { 167static struct irq_chip io7_msi_irq_type = {
172 .name = "MSI", 168 .name = "MSI",
173 .unmask = io7_enable_irq, 169 .irq_unmask = io7_enable_irq,
174 .mask = io7_disable_irq, 170 .irq_mask = io7_disable_irq,
175 .ack = marvel_irq_noop, 171 .irq_ack = marvel_irq_noop,
176}; 172};
177 173
178static void 174static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
280 276
281 /* Set up the lsi irqs. */ 277 /* Set up the lsi irqs. */
282 for (i = 0; i < 128; ++i) { 278 for (i = 0; i < 128; ++i) {
283 irq_to_desc(base + i)->status |= IRQ_LEVEL;
284 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 279 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
280 irq_set_status_flags(i, IRQ_LEVEL);
285 } 281 }
286 282
287 /* Disable the implemented irqs in hardware. */ 283 /* Disable the implemented irqs in hardware. */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
294 290
295 /* Set up the msi irqs. */ 291 /* Set up the msi irqs. */
296 for (i = 128; i < (128 + 512); ++i) { 292 for (i = 128; i < (128 + 512); ++i) {
297 irq_to_desc(base + i)->status |= IRQ_LEVEL;
298 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 293 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
294 irq_set_status_flags(i, IRQ_LEVEL);
299 } 295 }
300 296
301 for (i = 0; i < 16; ++i) 297 for (i = 0; i < 16; ++i)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index bcc1639e8efb..cf7f43dd3147 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
43} 43}
44 44
45static inline void 45static inline void
46mikasa_enable_irq(unsigned int irq) 46mikasa_enable_irq(struct irq_data *d)
47{ 47{
48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
49} 49}
50 50
51static void 51static void
52mikasa_disable_irq(unsigned int irq) 52mikasa_disable_irq(struct irq_data *d)
53{ 53{
54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
55} 55}
56 56
57static struct irq_chip mikasa_irq_type = { 57static struct irq_chip mikasa_irq_type = {
58 .name = "MIKASA", 58 .name = "MIKASA",
59 .unmask = mikasa_enable_irq, 59 .irq_unmask = mikasa_enable_irq,
60 .mask = mikasa_disable_irq, 60 .irq_mask = mikasa_disable_irq,
61 .mask_ack = mikasa_disable_irq, 61 .irq_mask_ack = mikasa_disable_irq,
62}; 62};
63 63
64static void 64static void
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
98 mikasa_update_irq_hw(0); 98 mikasa_update_irq_hw(0);
99 99
100 for (i = 16; i < 32; ++i) { 100 for (i = 16; i < 32; ++i) {
101 irq_to_desc(i)->status |= IRQ_LEVEL;
102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
102 irq_set_status_flags(i, IRQ_LEVEL);
103 } 103 }
104 104
105 init_i8259a_irqs(); 105 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index e88f4ae1260e..92bc188e94a9 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
48} 48}
49 49
50static void 50static void
51noritake_enable_irq(unsigned int irq) 51noritake_enable_irq(struct irq_data *d)
52{ 52{
53 noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 53 noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
54} 54}
55 55
56static void 56static void
57noritake_disable_irq(unsigned int irq) 57noritake_disable_irq(struct irq_data *d)
58{ 58{
59 noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 59 noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
60} 60}
61 61
62static struct irq_chip noritake_irq_type = { 62static struct irq_chip noritake_irq_type = {
63 .name = "NORITAKE", 63 .name = "NORITAKE",
64 .unmask = noritake_enable_irq, 64 .irq_unmask = noritake_enable_irq,
65 .mask = noritake_disable_irq, 65 .irq_mask = noritake_disable_irq,
66 .mask_ack = noritake_disable_irq, 66 .irq_mask_ack = noritake_disable_irq,
67}; 67};
68 68
69static void 69static void
@@ -127,8 +127,8 @@ noritake_init_irq(void)
127 outw(0, 0x54c); 127 outw(0, 0x54c);
128 128
129 for (i = 16; i < 48; ++i) { 129 for (i = 16; i < 48; ++i) {
130 irq_to_desc(i)->status |= IRQ_LEVEL;
131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
131 irq_set_status_flags(i, IRQ_LEVEL);
132 } 132 }
133 133
134 init_i8259a_irqs(); 134 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 6a51364dd1cc..936d4140ed5f 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
57 57
58static inline void 58static inline void
59rawhide_enable_irq(unsigned int irq) 59rawhide_enable_irq(struct irq_data *d)
60{ 60{
61 unsigned int mask, hose; 61 unsigned int mask, hose;
62 unsigned int irq = d->irq;
62 63
63 irq -= 16; 64 irq -= 16;
64 hose = irq / 24; 65 hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
76} 77}
77 78
78static void 79static void
79rawhide_disable_irq(unsigned int irq) 80rawhide_disable_irq(struct irq_data *d)
80{ 81{
81 unsigned int mask, hose; 82 unsigned int mask, hose;
83 unsigned int irq = d->irq;
82 84
83 irq -= 16; 85 irq -= 16;
84 hose = irq / 24; 86 hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
96} 98}
97 99
98static void 100static void
99rawhide_mask_and_ack_irq(unsigned int irq) 101rawhide_mask_and_ack_irq(struct irq_data *d)
100{ 102{
101 unsigned int mask, mask1, hose; 103 unsigned int mask, mask1, hose;
104 unsigned int irq = d->irq;
102 105
103 irq -= 16; 106 irq -= 16;
104 hose = irq / 24; 107 hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
123 126
124static struct irq_chip rawhide_irq_type = { 127static struct irq_chip rawhide_irq_type = {
125 .name = "RAWHIDE", 128 .name = "RAWHIDE",
126 .unmask = rawhide_enable_irq, 129 .irq_unmask = rawhide_enable_irq,
127 .mask = rawhide_disable_irq, 130 .irq_mask = rawhide_disable_irq,
128 .mask_ack = rawhide_mask_and_ack_irq, 131 .irq_mask_ack = rawhide_mask_and_ack_irq,
129}; 132};
130 133
131static void 134static void
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
177 } 180 }
178 181
179 for (i = 16; i < 128; ++i) { 182 for (i = 16; i < 128; ++i) {
180 irq_to_desc(i)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 183 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
184 irq_set_status_flags(i, IRQ_LEVEL);
182 } 185 }
183 186
184 init_i8259a_irqs(); 187 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 89e7e37ec84c..cea22a62913b 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
47} 47}
48 48
49static inline void 49static inline void
50rx164_enable_irq(unsigned int irq) 50rx164_enable_irq(struct irq_data *d)
51{ 51{
52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
53} 53}
54 54
55static void 55static void
56rx164_disable_irq(unsigned int irq) 56rx164_disable_irq(struct irq_data *d)
57{ 57{
58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
59} 59}
60 60
61static struct irq_chip rx164_irq_type = { 61static struct irq_chip rx164_irq_type = {
62 .name = "RX164", 62 .name = "RX164",
63 .unmask = rx164_enable_irq, 63 .irq_unmask = rx164_enable_irq,
64 .mask = rx164_disable_irq, 64 .irq_mask = rx164_disable_irq,
65 .mask_ack = rx164_disable_irq, 65 .irq_mask_ack = rx164_disable_irq,
66}; 66};
67 67
68static void 68static void
@@ -99,8 +99,8 @@ rx164_init_irq(void)
99 99
100 rx164_update_irq_hw(0); 100 rx164_update_irq_hw(0);
101 for (i = 16; i < 40; ++i) { 101 for (i = 16; i < 40; ++i) {
102 irq_to_desc(i)->status |= IRQ_LEVEL;
103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
103 irq_set_status_flags(i, IRQ_LEVEL);
104 } 104 }
105 105
106 init_i8259a_irqs(); 106 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 5c4423d1b06c..a349538aabc9 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
443/* GENERIC irq routines */ 443/* GENERIC irq routines */
444 444
445static inline void 445static inline void
446sable_lynx_enable_irq(unsigned int irq) 446sable_lynx_enable_irq(struct irq_data *d)
447{ 447{
448 unsigned long bit, mask; 448 unsigned long bit, mask;
449 449
450 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 450 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
451 spin_lock(&sable_lynx_irq_lock); 451 spin_lock(&sable_lynx_irq_lock);
452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
459} 459}
460 460
461static void 461static void
462sable_lynx_disable_irq(unsigned int irq) 462sable_lynx_disable_irq(struct irq_data *d)
463{ 463{
464 unsigned long bit, mask; 464 unsigned long bit, mask;
465 465
466 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 466 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
467 spin_lock(&sable_lynx_irq_lock); 467 spin_lock(&sable_lynx_irq_lock);
468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
475} 475}
476 476
477static void 477static void
478sable_lynx_mask_and_ack_irq(unsigned int irq) 478sable_lynx_mask_and_ack_irq(struct irq_data *d)
479{ 479{
480 unsigned long bit, mask; 480 unsigned long bit, mask;
481 481
482 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 482 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
483 spin_lock(&sable_lynx_irq_lock); 483 spin_lock(&sable_lynx_irq_lock);
484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
489 489
490static struct irq_chip sable_lynx_irq_type = { 490static struct irq_chip sable_lynx_irq_type = {
491 .name = "SABLE/LYNX", 491 .name = "SABLE/LYNX",
492 .unmask = sable_lynx_enable_irq, 492 .irq_unmask = sable_lynx_enable_irq,
493 .mask = sable_lynx_disable_irq, 493 .irq_mask = sable_lynx_disable_irq,
494 .mask_ack = sable_lynx_mask_and_ack_irq, 494 .irq_mask_ack = sable_lynx_mask_and_ack_irq,
495}; 495};
496 496
497static void 497static void
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
518 long i; 518 long i;
519 519
520 for (i = 0; i < nr_of_irqs; ++i) { 520 for (i = 0; i < nr_of_irqs; ++i) {
521 irq_to_desc(i)->status |= IRQ_LEVEL;
522 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type,
523 handle_level_irq); 522 handle_level_irq);
523 irq_set_status_flags(i, IRQ_LEVEL);
524 } 524 }
525 525
526 common_init_isa_dma(); 526 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index f8a1e8a862fb..42a5331f13c4 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
45} 45}
46 46
47static inline void 47static inline void
48takara_enable_irq(unsigned int irq) 48takara_enable_irq(struct irq_data *d)
49{ 49{
50 unsigned int irq = d->irq;
50 unsigned long mask; 51 unsigned long mask;
51 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
52 takara_update_irq_hw(irq, mask); 53 takara_update_irq_hw(irq, mask);
53} 54}
54 55
55static void 56static void
56takara_disable_irq(unsigned int irq) 57takara_disable_irq(struct irq_data *d)
57{ 58{
59 unsigned int irq = d->irq;
58 unsigned long mask; 60 unsigned long mask;
59 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
60 takara_update_irq_hw(irq, mask); 62 takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
62 64
63static struct irq_chip takara_irq_type = { 65static struct irq_chip takara_irq_type = {
64 .name = "TAKARA", 66 .name = "TAKARA",
65 .unmask = takara_enable_irq, 67 .irq_unmask = takara_enable_irq,
66 .mask = takara_disable_irq, 68 .irq_mask = takara_disable_irq,
67 .mask_ack = takara_disable_irq, 69 .irq_mask_ack = takara_disable_irq,
68}; 70};
69 71
70static void 72static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
136 takara_update_irq_hw(i, -1); 138 takara_update_irq_hw(i, -1);
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142 144
143 common_init_isa_dma(); 145 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index e02494bf5ef3..f6c108a3d673 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
112} 112}
113 113
114static inline void 114static inline void
115titan_enable_irq(unsigned int irq) 115titan_enable_irq(struct irq_data *d)
116{ 116{
117 unsigned int irq = d->irq;
117 spin_lock(&titan_irq_lock); 118 spin_lock(&titan_irq_lock);
118 titan_cached_irq_mask |= 1UL << (irq - 16); 119 titan_cached_irq_mask |= 1UL << (irq - 16);
119 titan_update_irq_hw(titan_cached_irq_mask); 120 titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
121} 122}
122 123
123static inline void 124static inline void
124titan_disable_irq(unsigned int irq) 125titan_disable_irq(struct irq_data *d)
125{ 126{
127 unsigned int irq = d->irq;
126 spin_lock(&titan_irq_lock); 128 spin_lock(&titan_irq_lock);
127 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_cached_irq_mask &= ~(1UL << (irq - 16));
128 titan_update_irq_hw(titan_cached_irq_mask); 130 titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
144} 146}
145 147
146static int 148static int
147titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 149titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
150 bool force)
148{ 151{
149 spin_lock(&titan_irq_lock); 152 spin_lock(&titan_irq_lock);
150 titan_cpu_set_irq_affinity(irq - 16, *affinity); 153 titan_cpu_set_irq_affinity(irq - 16, *affinity);
@@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
175{ 178{
176 long i; 179 long i;
177 for (i = imin; i <= imax; ++i) { 180 for (i = imin; i <= imax; ++i) {
178 irq_to_desc(i)->status |= IRQ_LEVEL;
179 set_irq_chip_and_handler(i, ops, handle_level_irq); 181 set_irq_chip_and_handler(i, ops, handle_level_irq);
182 irq_set_status_flags(i, IRQ_LEVEL);
180 } 183 }
181} 184}
182 185
183static struct irq_chip titan_irq_type = { 186static struct irq_chip titan_irq_type = {
184 .name = "TITAN", 187 .name = "TITAN",
185 .unmask = titan_enable_irq, 188 .irq_unmask = titan_enable_irq,
186 .mask = titan_disable_irq, 189 .irq_mask = titan_disable_irq,
187 .mask_ack = titan_disable_irq, 190 .irq_mask_ack = titan_disable_irq,
188 .set_affinity = titan_set_irq_affinity, 191 .irq_set_affinity = titan_set_irq_affinity,
189}; 192};
190 193
191static irqreturn_t 194static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index eec52594d410..ca60a387ef0a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
104} 104}
105 105
106static void 106static void
107wildfire_enable_irq(unsigned int irq) 107wildfire_enable_irq(struct irq_data *d)
108{ 108{
109 unsigned int irq = d->irq;
110
109 if (irq < 16) 111 if (irq < 16)
110 i8259a_enable_irq(irq); 112 i8259a_enable_irq(d);
111 113
112 spin_lock(&wildfire_irq_lock); 114 spin_lock(&wildfire_irq_lock);
113 set_bit(irq, &cached_irq_mask); 115 set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
116} 118}
117 119
118static void 120static void
119wildfire_disable_irq(unsigned int irq) 121wildfire_disable_irq(struct irq_data *d)
120{ 122{
123 unsigned int irq = d->irq;
124
121 if (irq < 16) 125 if (irq < 16)
122 i8259a_disable_irq(irq); 126 i8259a_disable_irq(d);
123 127
124 spin_lock(&wildfire_irq_lock); 128 spin_lock(&wildfire_irq_lock);
125 clear_bit(irq, &cached_irq_mask); 129 clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
128} 132}
129 133
130static void 134static void
131wildfire_mask_and_ack_irq(unsigned int irq) 135wildfire_mask_and_ack_irq(struct irq_data *d)
132{ 136{
137 unsigned int irq = d->irq;
138
133 if (irq < 16) 139 if (irq < 16)
134 i8259a_mask_and_ack_irq(irq); 140 i8259a_mask_and_ack_irq(d);
135 141
136 spin_lock(&wildfire_irq_lock); 142 spin_lock(&wildfire_irq_lock);
137 clear_bit(irq, &cached_irq_mask); 143 clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
141 147
142static struct irq_chip wildfire_irq_type = { 148static struct irq_chip wildfire_irq_type = {
143 .name = "WILDFIRE", 149 .name = "WILDFIRE",
144 .unmask = wildfire_enable_irq, 150 .irq_unmask = wildfire_enable_irq,
145 .mask = wildfire_disable_irq, 151 .irq_mask = wildfire_disable_irq,
146 .mask_ack = wildfire_mask_and_ack_irq, 152 .irq_mask_ack = wildfire_mask_and_ack_irq,
147}; 153};
148 154
149static void __init 155static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
177 for (i = 0; i < 16; ++i) { 183 for (i = 0; i < 16; ++i) {
178 if (i == 2) 184 if (i == 2)
179 continue; 185 continue;
180 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 186 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
182 handle_level_irq); 187 handle_level_irq);
188 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
183 } 189 }
184 190
185 irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
186 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 191 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
187 handle_level_irq); 192 handle_level_irq);
193 irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
188 for (i = 40; i < 64; ++i) { 194 for (i = 40; i < 64; ++i) {
189 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
190 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 195 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
191 handle_level_irq); 196 handle_level_irq);
197 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
192 } 198 }
193 199
194 setup_irq(32+irq_bias, &isa_enable); 200 setup_irq(32+irq_bias, &isa_enable);
195} 201}
196 202
197static void __init 203static void __init
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 26d45e5b636b..166efa2a19cd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622
1177 visible impact on the overall performance or power consumption of the 1177 visible impact on the overall performance or power consumption of the
1178 processor. 1178 processor.
1179 1179
1180config ARM_ERRATA_751472
1181 bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
1182 depends on CPU_V7 && SMP
1183 help
1184 This option enables the workaround for the 751472 Cortex-A9 (prior
1185 to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
1186 completion of a following broadcasted operation if the second
1187 operation is received by a CPU before the ICIALLUIS has completed,
1188 potentially leading to corrupted entries in the cache or TLB.
1189
1190config ARM_ERRATA_753970
1191 bool "ARM errata: cache sync operation may be faulty"
1192 depends on CACHE_PL310
1193 help
1194 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
1195
1196 Under some condition the effect of cache sync operation on
1197 the store buffer still remains when the operation completes.
1198 This means that the store buffer is always asked to drain and
1199 this prevents it from merging any further writes. The workaround
1200 is to replace the normal offset of cache sync operation (0x730)
1201 by another offset targeting an unmapped PL310 register 0x740.
1202 This has the same effect as the cache sync operation: store buffer
1203 drain and waiting for all buffers empty.
1204
1180endmenu 1205endmenu
1181 1206
1182source "arch/arm/common/Kconfig" 1207source "arch/arm/common/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c22c1adfedd6..6f7b29294c80 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
15LDFLAGS_vmlinux += --be8 15LDFLAGS_vmlinux += --be8
16endif 16endif
17 17
18OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S 18OBJCOPYFLAGS :=-O binary -R .comment -S
19GZFLAGS :=-9 19GZFLAGS :=-9
20#KBUILD_CFLAGS +=-pipe 20#KBUILD_CFLAGS +=-pipe
21# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: 21# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index ab204db594d3..c6028967d336 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,7 @@
1font.c 1font.c
2piggy.gz 2lib1funcs.S
3piggy.gzip
4piggy.lzo
5piggy.lzma
6vmlinux
3vmlinux.lds 7vmlinux.lds
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 5aeec1e1735c..16bd48031583 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,6 +36,7 @@
36#define L2X0_RAW_INTR_STAT 0x21C 36#define L2X0_RAW_INTR_STAT 0x21C
37#define L2X0_INTR_CLEAR 0x220 37#define L2X0_INTR_CLEAR 0x220
38#define L2X0_CACHE_SYNC 0x730 38#define L2X0_CACHE_SYNC 0x730
39#define L2X0_DUMMY_REG 0x740
39#define L2X0_INV_LINE_PA 0x770 40#define L2X0_INV_LINE_PA 0x770
40#define L2X0_INV_WAY 0x77C 41#define L2X0_INV_WAY 0x77C
41#define L2X0_CLEAN_LINE_PA 0x7B0 42#define L2X0_CLEAN_LINE_PA 0x7B0
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index 721847dc68ab..e0d1c0cfa548 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -58,6 +58,9 @@
58 58
59static inline void sysctl_soft_reset(void __iomem *base) 59static inline void sysctl_soft_reset(void __iomem *base)
60{ 60{
61 /* switch to slow mode */
62 writel(0x2, base + SCCTRL);
63
61 /* writing any value to SCSYSSTAT reg will reset system */ 64 /* writing any value to SCSYSSTAT reg will reset system */
62 writel(0, base + SCSYSSTAT); 65 writel(0, base + SCSYSSTAT);
63} 66}
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f57cd12..82dfe5d0c41e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
18#define __ASMARM_TLB_H 18#define __ASMARM_TLB_H
19 19
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22 21
23#ifndef CONFIG_MMU 22#ifndef CONFIG_MMU
24 23
25#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25
26#define tlb_flush(tlb) ((void) tlb)
27
26#include <asm-generic/tlb.h> 28#include <asm-generic/tlb.h>
27 29
28#else /* !CONFIG_MMU */ 30#else /* !CONFIG_MMU */
29 31
32#include <linux/swap.h>
30#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h>
35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#define FREE_PTE_NR 500
45#else
46#define tlb_fast_mode(tlb) 1
47#define FREE_PTE_NR 0
48#endif
31 49
32/* 50/*
33 * TLB handling. This allows us to remove pages from the page 51 * TLB handling. This allows us to remove pages from the page
@@ -36,12 +54,58 @@
36struct mmu_gather { 54struct mmu_gather {
37 struct mm_struct *mm; 55 struct mm_struct *mm;
38 unsigned int fullmm; 56 unsigned int fullmm;
57 struct vm_area_struct *vma;
39 unsigned long range_start; 58 unsigned long range_start;
40 unsigned long range_end; 59 unsigned long range_end;
60 unsigned int nr;
61 struct page *pages[FREE_PTE_NR];
41}; 62};
42 63
43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 64DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
44 65
66/*
67 * This is unnecessarily complex. There's three ways the TLB shootdown
68 * code is used:
69 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
70 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
71 * tlb->vma will be non-NULL.
72 * 2. Unmapping all vmas. See exit_mmap().
73 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
74 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
75 * 3. Unmapping argument pages. See shift_arg_pages().
76 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
77 * tlb->vma will be NULL.
78 */
79static inline void tlb_flush(struct mmu_gather *tlb)
80{
81 if (tlb->fullmm || !tlb->vma)
82 flush_tlb_mm(tlb->mm);
83 else if (tlb->range_end > 0) {
84 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
85 tlb->range_start = TASK_SIZE;
86 tlb->range_end = 0;
87 }
88}
89
90static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
91{
92 if (!tlb->fullmm) {
93 if (addr < tlb->range_start)
94 tlb->range_start = addr;
95 if (addr + PAGE_SIZE > tlb->range_end)
96 tlb->range_end = addr + PAGE_SIZE;
97 }
98}
99
100static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101{
102 tlb_flush(tlb);
103 if (!tlb_fast_mode(tlb)) {
104 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0;
106 }
107}
108
45static inline struct mmu_gather * 109static inline struct mmu_gather *
46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 110tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
47{ 111{
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
49 113
50 tlb->mm = mm; 114 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 115 tlb->fullmm = full_mm_flush;
116 tlb->vma = NULL;
117 tlb->nr = 0;
52 118
53 return tlb; 119 return tlb;
54} 120}
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
56static inline void 122static inline void
57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 123tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
58{ 124{
59 if (tlb->fullmm) 125 tlb_flush_mmu(tlb);
60 flush_tlb_mm(tlb->mm);
61 126
62 /* keep the page table cache within bounds */ 127 /* keep the page table cache within bounds */
63 check_pgt_cache(); 128 check_pgt_cache();
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
71static inline void 136static inline void
72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 137tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
73{ 138{
74 if (!tlb->fullmm) { 139 tlb_add_flush(tlb, addr);
75 if (addr < tlb->range_start)
76 tlb->range_start = addr;
77 if (addr + PAGE_SIZE > tlb->range_end)
78 tlb->range_end = addr + PAGE_SIZE;
79 }
80} 140}
81 141
82/* 142/*
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
89{ 149{
90 if (!tlb->fullmm) { 150 if (!tlb->fullmm) {
91 flush_cache_range(vma, vma->vm_start, vma->vm_end); 151 flush_cache_range(vma, vma->vm_start, vma->vm_end);
152 tlb->vma = vma;
92 tlb->range_start = TASK_SIZE; 153 tlb->range_start = TASK_SIZE;
93 tlb->range_end = 0; 154 tlb->range_end = 0;
94 } 155 }
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
97static inline void 158static inline void
98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 159tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
99{ 160{
100 if (!tlb->fullmm && tlb->range_end > 0) 161 if (!tlb->fullmm)
101 flush_tlb_range(vma, tlb->range_start, tlb->range_end); 162 tlb_flush(tlb);
163}
164
165static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166{
167 if (tlb_fast_mode(tlb)) {
168 free_page_and_swap_cache(page);
169 } else {
170 tlb->pages[tlb->nr++] = page;
171 if (tlb->nr >= FREE_PTE_NR)
172 tlb_flush_mmu(tlb);
173 }
174}
175
176static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
177 unsigned long addr)
178{
179 pgtable_page_dtor(pte);
180 tlb_add_flush(tlb, addr);
181 tlb_remove_page(tlb, pte);
102} 182}
103 183
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 184#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 185#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
107 186
108#define tlb_migrate_finish(mm) do { } while (0) 187#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ce7378ea15a2..d2005de383b8 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
10#ifndef _ASMARM_TLBFLUSH_H 10#ifndef _ASMARM_TLBFLUSH_H
11#define _ASMARM_TLBFLUSH_H 11#define _ASMARM_TLBFLUSH_H
12 12
13 13#ifdef CONFIG_MMU
14#ifndef CONFIG_MMU
15
16#define tlb_flush(tlb) ((void) tlb)
17
18#else /* CONFIG_MMU */
19 14
20#include <asm/glue.h> 15#include <asm/glue.h>
21 16
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 2c1f0050c9c4..8f6ed43861f1 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1437 1437
1438 return space_cccc_1100_010x(insn, asi); 1438 return space_cccc_1100_010x(insn, asi);
1439 1439
1440 } else if ((insn & 0x0e000000) == 0x0c400000) { 1440 } else if ((insn & 0x0e000000) == 0x0c000000) {
1441 1441
1442 return space_cccc_110x(insn, asi); 1442 return space_cccc_110x(insn, asi);
1443 1443
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index b8af96ea62e6..2c79eec19262 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
97 irq, cpu); 97 irq, cpu);
98 return err; 98 return err;
99#else 99#else
100 return 0; 100 return -EINVAL;
101#endif 101#endif
102} 102}
103 103
104static int 104static int
105init_cpu_pmu(void) 105init_cpu_pmu(void)
106{ 106{
107 int i, err = 0; 107 int i, irqs, err = 0;
108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; 108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
109 109
110 if (!pdev) { 110 if (!pdev)
111 err = -ENODEV; 111 return -ENODEV;
112 goto out; 112
113 } 113 irqs = pdev->num_resources;
114
115 /*
116 * If we have a single PMU interrupt that we can't shift, assume that
117 * we're running on a uniprocessor machine and continue.
118 */
119 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
120 return 0;
114 121
115 for (i = 0; i < pdev->num_resources; ++i) { 122 for (i = 0; i < irqs; ++i) {
116 err = set_irq_affinity(platform_get_irq(pdev, i), i); 123 err = set_irq_affinity(platform_get_irq(pdev, i), i);
117 if (err) 124 if (err)
118 break; 125 break;
119 } 126 }
120 127
121out:
122 return err; 128 return err;
123} 129}
124 130
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 420b8d6485d6..5ea4fb718b97 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@ int cpu_architecture(void)
226 * Register 0 and check for VMSAv7 or PMSAv7 */ 226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4" 227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0)); 228 : "=r" (mmfr0));
229 if ((mmfr0 & 0x0000000f) == 0x00000003 || 229 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 (mmfr0 & 0x000000f0) == 0x00000030) 230 (mmfr0 & 0x000000f0) >= 0x00000030)
231 cpu_arch = CPU_ARCH_ARMv7; 231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020) 233 (mmfr0 & 0x000000f0) == 0x00000020)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 907d5a620bca..abaf8445ce25 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
474 unsigned long handler = (unsigned long)ka->sa.sa_handler; 474 unsigned long handler = (unsigned long)ka->sa.sa_handler;
475 unsigned long retcode; 475 unsigned long retcode;
476 int thumb = 0; 476 int thumb = 0;
477 unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; 477 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
478
479 cpsr |= PSR_ENDSTATE;
478 480
479 /* 481 /*
480 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 482 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 86b66f3f2031..61462790757f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,6 +21,12 @@
21#define ARM_CPU_KEEP(x) 21#define ARM_CPU_KEEP(x)
22#endif 22#endif
23 23
24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
25#define ARM_EXIT_KEEP(x) x
26#else
27#define ARM_EXIT_KEEP(x)
28#endif
29
24OUTPUT_ARCH(arm) 30OUTPUT_ARCH(arm)
25ENTRY(stext) 31ENTRY(stext)
26 32
@@ -43,6 +49,7 @@ SECTIONS
43 _sinittext = .; 49 _sinittext = .;
44 HEAD_TEXT 50 HEAD_TEXT
45 INIT_TEXT 51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
46 _einittext = .; 53 _einittext = .;
47 ARM_CPU_DISCARD(PROC_INFO) 54 ARM_CPU_DISCARD(PROC_INFO)
48 __arch_info_begin = .; 55 __arch_info_begin = .;
@@ -67,6 +74,7 @@ SECTIONS
67#ifndef CONFIG_XIP_KERNEL 74#ifndef CONFIG_XIP_KERNEL
68 __init_begin = _stext; 75 __init_begin = _stext;
69 INIT_DATA 76 INIT_DATA
77 ARM_EXIT_KEEP(EXIT_DATA)
70#endif 78#endif
71 } 79 }
72 80
@@ -162,6 +170,7 @@ SECTIONS
162 . = ALIGN(PAGE_SIZE); 170 . = ALIGN(PAGE_SIZE);
163 __init_begin = .; 171 __init_begin = .;
164 INIT_DATA 172 INIT_DATA
173 ARM_EXIT_KEEP(EXIT_DATA)
165 . = ALIGN(PAGE_SIZE); 174 . = ALIGN(PAGE_SIZE);
166 __init_end = .; 175 __init_end = .;
167#endif 176#endif
@@ -247,6 +256,8 @@ SECTIONS
247 } 256 }
248#endif 257#endif
249 258
259 NOTES
260
250 BSS_SECTION(0, 0, 0) 261 BSS_SECTION(0, 0, 0)
251 _end = .; 262 _end = .;
252 263
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c3f549..acb7ae5b0a25 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
77 dd = clk->dpll_data; 77 dd = clk->dpll_data;
78 78
79 /* DPLL divider must result in a valid jitter correction val */ 79 /* DPLL divider must result in a valid jitter correction val */
80 fint = clk->parent->rate / (n + 1); 80 fint = clk->parent->rate / n;
81 if (fint < DPLL_FINT_BAND1_MIN) { 81 if (fint < DPLL_FINT_BAND1_MIN) {
82 82
83 pr_debug("rejecting n=%d due to Fint failure, " 83 pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413dc7deb..0a585dfa9874 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
334 .priv = &omap2_mbox_iva_priv, 334 .priv = &omap2_mbox_iva_priv,
335}; 335};
336 336
337struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; 337struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
338#endif 338#endif
339 339
340#if defined(CONFIG_ARCH_OMAP4) 340#if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 98148b6c36e9..6c84659cf846 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
605 list_for_each_entry(e, &partition->muxmodes, node) { 605 list_for_each_entry(e, &partition->muxmodes, node) {
606 struct omap_mux *m = &e->mux; 606 struct omap_mux *m = &e->mux;
607 607
608 (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, 608 (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
609 m, &omap_mux_dbg_signal_fops); 609 m, &omap_mux_dbg_signal_fops);
610 } 610 }
611} 611}
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f56591fb5..a5a83b358ddd 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
637 637
638 } 638 }
639 639
640 (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, 640 (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
641 &enable_off_mode, &pm_dbg_option_fops); 641 &enable_off_mode, &pm_dbg_option_fops);
642 (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, 642 (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
643 &sleep_while_idle, &pm_dbg_option_fops); 643 &sleep_while_idle, &pm_dbg_option_fops);
644 (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, 644 (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
645 &wakeup_timer_seconds, &pm_dbg_option_fops); 645 &wakeup_timer_seconds, &pm_dbg_option_fops);
646 (void) debugfs_create_file("wakeup_timer_milliseconds", 646 (void) debugfs_create_file("wakeup_timer_milliseconds",
647 S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, 647 S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
648 &pm_dbg_option_fops); 648 &pm_dbg_option_fops);
649 pm_dbg_init_done = 1; 649 pm_dbg_init_done = 1;
650 650
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644ce852..3300ff6e3cfe 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
38#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 38#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
39 39
40/* PRCM_MPU clockdomain register offsets (from instance start) */ 40/* PRCM_MPU clockdomain register offsets (from instance start) */
41#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 41#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
42#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 42#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
43 43
44 44
45/* 45/*
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index c37e823266d3..95ac336fe3f7 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
900 return PTR_ERR(dbg_dir); 900 return PTR_ERR(dbg_dir);
901 } 901 }
902 902
903 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, 903 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
904 (void *)sr_info, &pm_sr_fops); 904 (void *)sr_info, &pm_sr_fops);
905 (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, 905 (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
906 &sr_info->err_weight); 906 &sr_info->err_weight);
@@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
939 strcpy(name, "volt_"); 939 strcpy(name, "volt_");
940 sprintf(volt_name, "%d", volt_data[i].volt_nominal); 940 sprintf(volt_name, "%d", volt_data[i].volt_nominal);
941 strcat(name, volt_name); 941 strcat(name, volt_name);
942 (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir, 942 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
943 &(sr_info->nvalue_table[i].nvalue)); 943 &(sr_info->nvalue_table[i].nvalue));
944 } 944 }
945 945
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 7b7c2683ae7b..0fc550e7e482 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,6 +39,7 @@
39#include <asm/mach/time.h> 39#include <asm/mach/time.h>
40#include <plat/dmtimer.h> 40#include <plat/dmtimer.h>
41#include <asm/localtimer.h> 41#include <asm/localtimer.h>
42#include <asm/sched_clock.h>
42 43
43#include "timer-gp.h" 44#include "timer-gp.h"
44 45
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
190/* 191/*
191 * clocksource 192 * clocksource
192 */ 193 */
194static DEFINE_CLOCK_DATA(cd);
193static struct omap_dm_timer *gpt_clocksource; 195static struct omap_dm_timer *gpt_clocksource;
194static cycle_t clocksource_read_cycles(struct clocksource *cs) 196static cycle_t clocksource_read_cycles(struct clocksource *cs)
195{ 197{
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
204 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 206 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
205}; 207};
206 208
209static void notrace dmtimer_update_sched_clock(void)
210{
211 u32 cyc;
212
213 cyc = omap_dm_timer_read_counter(gpt_clocksource);
214
215 update_sched_clock(&cd, cyc, (u32)~0);
216}
217
207/* Setup free-running counter for clocksource */ 218/* Setup free-running counter for clocksource */
208static void __init omap2_gp_clocksource_init(void) 219static void __init omap2_gp_clocksource_init(void)
209{ 220{
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
224 235
225 omap_dm_timer_set_load_start(gpt, 1, 0); 236 omap_dm_timer_set_load_start(gpt, 1, 0);
226 237
238 init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
239
227 if (clocksource_register_hz(&clocksource_gpt, tick_rate)) 240 if (clocksource_register_hz(&clocksource_gpt, tick_rate))
228 printk(err2, clocksource_gpt.name); 241 printk(err2, clocksource_gpt.name);
229} 242}
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 203dd5a18bd5..058dab4482a1 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/map.h 1/* linux/arch/arm/mach-s5p6442/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5P6442 - Memory map definitions 6 * S5P6442 - Memory map definitions
@@ -16,56 +16,61 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5P6442_PA_CHIPID (0xE0000000) 19#define S5P6442_PA_SDRAM 0x20000000
20#define S5P_PA_CHIPID S5P6442_PA_CHIPID
21 20
22#define S5P6442_PA_SYSCON (0xE0100000) 21#define S5P6442_PA_I2S0 0xC0B00000
23#define S5P_PA_SYSCON S5P6442_PA_SYSCON 22#define S5P6442_PA_I2S1 0xF2200000
24 23
25#define S5P6442_PA_GPIO (0xE0200000) 24#define S5P6442_PA_CHIPID 0xE0000000
26 25
27#define S5P6442_PA_VIC0 (0xE4000000) 26#define S5P6442_PA_SYSCON 0xE0100000
28#define S5P6442_PA_VIC1 (0xE4100000)
29#define S5P6442_PA_VIC2 (0xE4200000)
30 27
31#define S5P6442_PA_SROMC (0xE7000000) 28#define S5P6442_PA_GPIO 0xE0200000
32#define S5P_PA_SROMC S5P6442_PA_SROMC
33 29
34#define S5P6442_PA_MDMA 0xE8000000 30#define S5P6442_PA_VIC0 0xE4000000
35#define S5P6442_PA_PDMA 0xE9000000 31#define S5P6442_PA_VIC1 0xE4100000
32#define S5P6442_PA_VIC2 0xE4200000
36 33
37#define S5P6442_PA_TIMER (0xEA000000) 34#define S5P6442_PA_SROMC 0xE7000000
38#define S5P_PA_TIMER S5P6442_PA_TIMER
39 35
40#define S5P6442_PA_SYSTIMER (0xEA100000) 36#define S5P6442_PA_MDMA 0xE8000000
37#define S5P6442_PA_PDMA 0xE9000000
41 38
42#define S5P6442_PA_WATCHDOG (0xEA200000) 39#define S5P6442_PA_TIMER 0xEA000000
43 40
44#define S5P6442_PA_UART (0xEC000000) 41#define S5P6442_PA_SYSTIMER 0xEA100000
45 42
46#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) 43#define S5P6442_PA_WATCHDOG 0xEA200000
47#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400)
48#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800)
49#define S5P_SZ_UART SZ_256
50 44
51#define S5P6442_PA_IIC0 (0xEC100000) 45#define S5P6442_PA_UART 0xEC000000
52 46
53#define S5P6442_PA_SDRAM (0x20000000) 47#define S5P6442_PA_IIC0 0xEC100000
54#define S5P_PA_SDRAM S5P6442_PA_SDRAM
55 48
56#define S5P6442_PA_SPI 0xEC300000 49#define S5P6442_PA_SPI 0xEC300000
57 50
58/* I2S */
59#define S5P6442_PA_I2S0 0xC0B00000
60#define S5P6442_PA_I2S1 0xF2200000
61
62/* PCM */
63#define S5P6442_PA_PCM0 0xF2400000 51#define S5P6442_PA_PCM0 0xF2400000
64#define S5P6442_PA_PCM1 0xF2500000 52#define S5P6442_PA_PCM1 0xF2500000
65 53
66/* compatibiltiy defines. */ 54/* Compatibiltiy Defines */
55
56#define S3C_PA_IIC S5P6442_PA_IIC0
67#define S3C_PA_WDT S5P6442_PA_WATCHDOG 57#define S3C_PA_WDT S5P6442_PA_WATCHDOG
58
59#define S5P_PA_CHIPID S5P6442_PA_CHIPID
60#define S5P_PA_SDRAM S5P6442_PA_SDRAM
61#define S5P_PA_SROMC S5P6442_PA_SROMC
62#define S5P_PA_SYSCON S5P6442_PA_SYSCON
63#define S5P_PA_TIMER S5P6442_PA_TIMER
64
65/* UART */
66
68#define S3C_PA_UART S5P6442_PA_UART 67#define S3C_PA_UART S5P6442_PA_UART
69#define S3C_PA_IIC S5P6442_PA_IIC0 68
69#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
70#define S5P_PA_UART0 S5P_PA_UART(0)
71#define S5P_PA_UART1 S5P_PA_UART(1)
72#define S5P_PA_UART2 S5P_PA_UART(2)
73
74#define S5P_SZ_UART SZ_256
70 75
71#endif /* __ASM_ARCH_MAP_H */ 76#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index a9365e5ba614..95c91257c7ca 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5p64x0/include/mach/map.h 1/* linux/arch/arm/mach-s5p64x0/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 4 * http://www.samsung.com
5 * 5 *
6 * S5P64X0 - Memory map definitions 6 * S5P64X0 - Memory map definitions
@@ -16,64 +16,46 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5P64X0_PA_SDRAM (0x20000000) 19#define S5P64X0_PA_SDRAM 0x20000000
20 20
21#define S5P64X0_PA_CHIPID (0xE0000000) 21#define S5P64X0_PA_CHIPID 0xE0000000
22#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
23
24#define S5P64X0_PA_SYSCON (0xE0100000)
25#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
26
27#define S5P64X0_PA_GPIO (0xE0308000)
28
29#define S5P64X0_PA_VIC0 (0xE4000000)
30#define S5P64X0_PA_VIC1 (0xE4100000)
31 22
32#define S5P64X0_PA_SROMC (0xE7000000) 23#define S5P64X0_PA_SYSCON 0xE0100000
33#define S5P_PA_SROMC S5P64X0_PA_SROMC
34
35#define S5P64X0_PA_PDMA (0xE9000000)
36
37#define S5P64X0_PA_TIMER (0xEA000000)
38#define S5P_PA_TIMER S5P64X0_PA_TIMER
39 24
40#define S5P64X0_PA_RTC (0xEA100000) 25#define S5P64X0_PA_GPIO 0xE0308000
41 26
42#define S5P64X0_PA_WDT (0xEA200000) 27#define S5P64X0_PA_VIC0 0xE4000000
28#define S5P64X0_PA_VIC1 0xE4100000
43 29
44#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) 30#define S5P64X0_PA_SROMC 0xE7000000
45#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
46 31
47#define S5P_PA_UART0 S5P6450_PA_UART(0) 32#define S5P64X0_PA_PDMA 0xE9000000
48#define S5P_PA_UART1 S5P6450_PA_UART(1)
49#define S5P_PA_UART2 S5P6450_PA_UART(2)
50#define S5P_PA_UART3 S5P6450_PA_UART(3)
51#define S5P_PA_UART4 S5P6450_PA_UART(4)
52#define S5P_PA_UART5 S5P6450_PA_UART(5)
53 33
54#define S5P_SZ_UART SZ_256 34#define S5P64X0_PA_TIMER 0xEA000000
35#define S5P64X0_PA_RTC 0xEA100000
36#define S5P64X0_PA_WDT 0xEA200000
55 37
56#define S5P6440_PA_IIC0 (0xEC104000) 38#define S5P6440_PA_IIC0 0xEC104000
57#define S5P6440_PA_IIC1 (0xEC20F000) 39#define S5P6440_PA_IIC1 0xEC20F000
58#define S5P6450_PA_IIC0 (0xEC100000) 40#define S5P6450_PA_IIC0 0xEC100000
59#define S5P6450_PA_IIC1 (0xEC200000) 41#define S5P6450_PA_IIC1 0xEC200000
60 42
61#define S5P64X0_PA_SPI0 (0xEC400000) 43#define S5P64X0_PA_SPI0 0xEC400000
62#define S5P64X0_PA_SPI1 (0xEC500000) 44#define S5P64X0_PA_SPI1 0xEC500000
63 45
64#define S5P64X0_PA_HSOTG (0xED100000) 46#define S5P64X0_PA_HSOTG 0xED100000
65 47
66#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) 48#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
67 49
68#define S5P64X0_PA_I2S (0xF2000000) 50#define S5P64X0_PA_I2S 0xF2000000
69#define S5P6450_PA_I2S1 0xF2800000 51#define S5P6450_PA_I2S1 0xF2800000
70#define S5P6450_PA_I2S2 0xF2900000 52#define S5P6450_PA_I2S2 0xF2900000
71 53
72#define S5P64X0_PA_PCM (0xF2100000) 54#define S5P64X0_PA_PCM 0xF2100000
73 55
74#define S5P64X0_PA_ADC (0xF3000000) 56#define S5P64X0_PA_ADC 0xF3000000
75 57
76/* compatibiltiy defines. */ 58/* Compatibiltiy Defines */
77 59
78#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) 60#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
79#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) 61#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
@@ -83,6 +65,25 @@
83#define S3C_PA_RTC S5P64X0_PA_RTC 65#define S3C_PA_RTC S5P64X0_PA_RTC
84#define S3C_PA_WDT S5P64X0_PA_WDT 66#define S3C_PA_WDT S5P64X0_PA_WDT
85 67
68#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
69#define S5P_PA_SROMC S5P64X0_PA_SROMC
70#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
71#define S5P_PA_TIMER S5P64X0_PA_TIMER
72
86#define SAMSUNG_PA_ADC S5P64X0_PA_ADC 73#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
87 74
75/* UART */
76
77#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
78#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
79
80#define S5P_PA_UART0 S5P6450_PA_UART(0)
81#define S5P_PA_UART1 S5P6450_PA_UART(1)
82#define S5P_PA_UART2 S5P6450_PA_UART(2)
83#define S5P_PA_UART3 S5P6450_PA_UART(3)
84#define S5P_PA_UART4 S5P6450_PA_UART(4)
85#define S5P_PA_UART5 S5P6450_PA_UART(5)
86
87#define S5P_SZ_UART SZ_256
88
88#endif /* __ASM_ARCH_MAP_H */ 89#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 328467b346aa..ccbe6b767f7d 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -1,5 +1,8 @@
1/* linux/arch/arm/mach-s5pc100/include/mach/map.h 1/* linux/arch/arm/mach-s5pc100/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
3 * Copyright 2009 Samsung Electronics Co. 6 * Copyright 2009 Samsung Electronics Co.
4 * Byungho Min <bhmin@samsung.com> 7 * Byungho Min <bhmin@samsung.com>
5 * 8 *
@@ -16,145 +19,115 @@
16#include <plat/map-base.h> 19#include <plat/map-base.h>
17#include <plat/map-s5p.h> 20#include <plat/map-s5p.h>
18 21
19/* 22#define S5PC100_PA_SDRAM 0x20000000
20 * map-base.h has already defined virtual memory address 23
21 * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) 24#define S5PC100_PA_ONENAND 0xE7100000
22 * S3C_VA_SYS S3C_ADDR(0x00100000) system control 25#define S5PC100_PA_ONENAND_BUF 0xB0000000
23 * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) 26
24 * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block 27#define S5PC100_PA_CHIPID 0xE0000000
25 * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog
26 * S3C_VA_UART S3C_ADDR(0x01000000) UART
27 *
28 * S5PC100 specific virtual memory address can be defined here
29 * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO
30 *
31 */
32 28
33#define S5PC100_PA_ONENAND_BUF (0xB0000000) 29#define S5PC100_PA_SYSCON 0xE0100000
34#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
35 30
36/* Chip ID */ 31#define S5PC100_PA_OTHERS 0xE0200000
37 32
38#define S5PC100_PA_CHIPID (0xE0000000) 33#define S5PC100_PA_GPIO 0xE0300000
39#define S5P_PA_CHIPID S5PC100_PA_CHIPID
40 34
41#define S5PC100_PA_SYSCON (0xE0100000) 35#define S5PC100_PA_VIC0 0xE4000000
42#define S5P_PA_SYSCON S5PC100_PA_SYSCON 36#define S5PC100_PA_VIC1 0xE4100000
37#define S5PC100_PA_VIC2 0xE4200000
43 38
44#define S5PC100_PA_OTHERS (0xE0200000) 39#define S5PC100_PA_SROMC 0xE7000000
45#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
46 40
47#define S5PC100_PA_GPIO (0xE0300000) 41#define S5PC100_PA_CFCON 0xE7800000
48#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
49 42
50/* Interrupt */ 43#define S5PC100_PA_MDMA 0xE8100000
51#define S5PC100_PA_VIC0 (0xE4000000) 44#define S5PC100_PA_PDMA0 0xE9000000
52#define S5PC100_PA_VIC1 (0xE4100000) 45#define S5PC100_PA_PDMA1 0xE9200000
53#define S5PC100_PA_VIC2 (0xE4200000)
54#define S5PC100_VA_VIC S3C_VA_IRQ
55#define S5PC100_VA_VIC_OFFSET 0x10000
56#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
57 46
58#define S5PC100_PA_SROMC (0xE7000000) 47#define S5PC100_PA_TIMER 0xEA000000
59#define S5P_PA_SROMC S5PC100_PA_SROMC 48#define S5PC100_PA_SYSTIMER 0xEA100000
49#define S5PC100_PA_WATCHDOG 0xEA200000
50#define S5PC100_PA_RTC 0xEA300000
60 51
61#define S5PC100_PA_ONENAND (0xE7100000) 52#define S5PC100_PA_UART 0xEC000000
62 53
63#define S5PC100_PA_CFCON (0xE7800000) 54#define S5PC100_PA_IIC0 0xEC100000
55#define S5PC100_PA_IIC1 0xEC200000
64 56
65/* DMA */ 57#define S5PC100_PA_SPI0 0xEC300000
66#define S5PC100_PA_MDMA (0xE8100000) 58#define S5PC100_PA_SPI1 0xEC400000
67#define S5PC100_PA_PDMA0 (0xE9000000) 59#define S5PC100_PA_SPI2 0xEC500000
68#define S5PC100_PA_PDMA1 (0xE9200000)
69 60
70/* Timer */ 61#define S5PC100_PA_USB_HSOTG 0xED200000
71#define S5PC100_PA_TIMER (0xEA000000) 62#define S5PC100_PA_USB_HSPHY 0xED300000
72#define S5P_PA_TIMER S5PC100_PA_TIMER
73 63
74#define S5PC100_PA_SYSTIMER (0xEA100000) 64#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
75 65
76#define S5PC100_PA_WATCHDOG (0xEA200000) 66#define S5PC100_PA_FB 0xEE000000
77#define S5PC100_PA_RTC (0xEA300000)
78 67
79#define S5PC100_PA_UART (0xEC000000) 68#define S5PC100_PA_FIMC0 0xEE200000
69#define S5PC100_PA_FIMC1 0xEE300000
70#define S5PC100_PA_FIMC2 0xEE400000
80 71
81#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) 72#define S5PC100_PA_I2S0 0xF2000000
82#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) 73#define S5PC100_PA_I2S1 0xF2100000
83#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) 74#define S5PC100_PA_I2S2 0xF2200000
84#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
85#define S5P_SZ_UART SZ_256
86 75
87#define S5PC100_PA_IIC0 (0xEC100000) 76#define S5PC100_PA_AC97 0xF2300000
88#define S5PC100_PA_IIC1 (0xEC200000)
89 77
90/* SPI */ 78#define S5PC100_PA_PCM0 0xF2400000
91#define S5PC100_PA_SPI0 0xEC300000 79#define S5PC100_PA_PCM1 0xF2500000
92#define S5PC100_PA_SPI1 0xEC400000
93#define S5PC100_PA_SPI2 0xEC500000
94 80
95/* USB HS OTG */ 81#define S5PC100_PA_SPDIF 0xF2600000
96#define S5PC100_PA_USB_HSOTG (0xED200000)
97#define S5PC100_PA_USB_HSPHY (0xED300000)
98 82
99#define S5PC100_PA_FB (0xEE000000) 83#define S5PC100_PA_TSADC 0xF3000000
100 84
101#define S5PC100_PA_FIMC0 (0xEE200000) 85#define S5PC100_PA_KEYPAD 0xF3100000
102#define S5PC100_PA_FIMC1 (0xEE300000)
103#define S5PC100_PA_FIMC2 (0xEE400000)
104 86
105#define S5PC100_PA_I2S0 (0xF2000000) 87/* Compatibiltiy Defines */
106#define S5PC100_PA_I2S1 (0xF2100000)
107#define S5PC100_PA_I2S2 (0xF2200000)
108 88
109#define S5PC100_PA_AC97 0xF2300000 89#define S3C_PA_FB S5PC100_PA_FB
90#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
91#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
92#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
93#define S3C_PA_IIC S5PC100_PA_IIC0
94#define S3C_PA_IIC1 S5PC100_PA_IIC1
95#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
96#define S3C_PA_ONENAND S5PC100_PA_ONENAND
97#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
98#define S3C_PA_RTC S5PC100_PA_RTC
99#define S3C_PA_TSADC S5PC100_PA_TSADC
100#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
101#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
102#define S3C_PA_WDT S5PC100_PA_WATCHDOG
110 103
111/* PCM */ 104#define S5P_PA_CHIPID S5PC100_PA_CHIPID
112#define S5PC100_PA_PCM0 0xF2400000 105#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
113#define S5PC100_PA_PCM1 0xF2500000 106#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
107#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
108#define S5P_PA_SDRAM S5PC100_PA_SDRAM
109#define S5P_PA_SROMC S5PC100_PA_SROMC
110#define S5P_PA_SYSCON S5PC100_PA_SYSCON
111#define S5P_PA_TIMER S5PC100_PA_TIMER
114 112
115#define S5PC100_PA_SPDIF 0xF2600000 113#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
114#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
115#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
116 116
117#define S5PC100_PA_TSADC (0xF3000000) 117#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
118 118
119/* KEYPAD */ 119#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
120#define S5PC100_PA_KEYPAD (0xF3100000)
121 120
122#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) 121/* UART */
123 122
124#define S5PC100_PA_SDRAM (0x20000000) 123#define S3C_PA_UART S5PC100_PA_UART
125#define S5P_PA_SDRAM S5PC100_PA_SDRAM
126 124
127/* compatibiltiy defines. */ 125#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
128#define S3C_PA_UART S5PC100_PA_UART 126#define S5P_PA_UART0 S5P_PA_UART(0)
129#define S3C_PA_IIC S5PC100_PA_IIC0 127#define S5P_PA_UART1 S5P_PA_UART(1)
130#define S3C_PA_IIC1 S5PC100_PA_IIC1 128#define S5P_PA_UART2 S5P_PA_UART(2)
131#define S3C_PA_FB S5PC100_PA_FB 129#define S5P_PA_UART3 S5P_PA_UART(3)
132#define S3C_PA_G2D S5PC100_PA_G2D
133#define S3C_PA_G3D S5PC100_PA_G3D
134#define S3C_PA_JPEG S5PC100_PA_JPEG
135#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
136#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
137#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
138#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
139#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
140#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
141#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
142#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
143#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
144#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
145#define S3C_PA_WDT S5PC100_PA_WATCHDOG
146#define S3C_PA_TSADC S5PC100_PA_TSADC
147#define S3C_PA_ONENAND S5PC100_PA_ONENAND
148#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
149#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
150#define S3C_PA_RTC S5PC100_PA_RTC
151
152#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
153#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
154#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
155 130
156#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 131#define S5P_SZ_UART SZ_256
157#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
158#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
159 132
160#endif /* __ASM_ARCH_C100_MAP_H */ 133#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 3611492ad681..1dd58836fd4f 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5pv210/include/mach/map.h 1/* linux/arch/arm/mach-s5pv210/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5PV210 - Memory map definitions 6 * S5PV210 - Memory map definitions
@@ -16,122 +16,120 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5PV210_PA_SROM_BANK5 (0xA8000000) 19#define S5PV210_PA_SDRAM 0x20000000
20 20
21#define S5PC110_PA_ONENAND (0xB0000000) 21#define S5PV210_PA_SROM_BANK5 0xA8000000
22#define S5P_PA_ONENAND S5PC110_PA_ONENAND
23 22
24#define S5PC110_PA_ONENAND_DMA (0xB0600000) 23#define S5PC110_PA_ONENAND 0xB0000000
25#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA 24#define S5PC110_PA_ONENAND_DMA 0xB0600000
26 25
27#define S5PV210_PA_CHIPID (0xE0000000) 26#define S5PV210_PA_CHIPID 0xE0000000
28#define S5P_PA_CHIPID S5PV210_PA_CHIPID
29 27
30#define S5PV210_PA_SYSCON (0xE0100000) 28#define S5PV210_PA_SYSCON 0xE0100000
31#define S5P_PA_SYSCON S5PV210_PA_SYSCON
32 29
33#define S5PV210_PA_GPIO (0xE0200000) 30#define S5PV210_PA_GPIO 0xE0200000
34 31
35/* SPI */ 32#define S5PV210_PA_SPDIF 0xE1100000
36#define S5PV210_PA_SPI0 0xE1300000
37#define S5PV210_PA_SPI1 0xE1400000
38 33
39#define S5PV210_PA_KEYPAD (0xE1600000) 34#define S5PV210_PA_SPI0 0xE1300000
35#define S5PV210_PA_SPI1 0xE1400000
40 36
41#define S5PV210_PA_IIC0 (0xE1800000) 37#define S5PV210_PA_KEYPAD 0xE1600000
42#define S5PV210_PA_IIC1 (0xFAB00000)
43#define S5PV210_PA_IIC2 (0xE1A00000)
44 38
45#define S5PV210_PA_TIMER (0xE2500000) 39#define S5PV210_PA_ADC 0xE1700000
46#define S5P_PA_TIMER S5PV210_PA_TIMER
47 40
48#define S5PV210_PA_SYSTIMER (0xE2600000) 41#define S5PV210_PA_IIC0 0xE1800000
42#define S5PV210_PA_IIC1 0xFAB00000
43#define S5PV210_PA_IIC2 0xE1A00000
49 44
50#define S5PV210_PA_WATCHDOG (0xE2700000) 45#define S5PV210_PA_AC97 0xE2200000
51 46
52#define S5PV210_PA_RTC (0xE2800000) 47#define S5PV210_PA_PCM0 0xE2300000
53#define S5PV210_PA_UART (0xE2900000) 48#define S5PV210_PA_PCM1 0xE1200000
49#define S5PV210_PA_PCM2 0xE2B00000
54 50
55#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) 51#define S5PV210_PA_TIMER 0xE2500000
56#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) 52#define S5PV210_PA_SYSTIMER 0xE2600000
57#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) 53#define S5PV210_PA_WATCHDOG 0xE2700000
58#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) 54#define S5PV210_PA_RTC 0xE2800000
59 55
60#define S5P_SZ_UART SZ_256 56#define S5PV210_PA_UART 0xE2900000
61 57
62#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) 58#define S5PV210_PA_SROMC 0xE8000000
63 59
64#define S5PV210_PA_SROMC (0xE8000000) 60#define S5PV210_PA_CFCON 0xE8200000
65#define S5P_PA_SROMC S5PV210_PA_SROMC
66 61
67#define S5PV210_PA_CFCON (0xE8200000) 62#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
68 63
69#define S5PV210_PA_MDMA 0xFA200000 64#define S5PV210_PA_HSOTG 0xEC000000
70#define S5PV210_PA_PDMA0 0xE0900000 65#define S5PV210_PA_HSPHY 0xEC100000
71#define S5PV210_PA_PDMA1 0xE0A00000
72 66
73#define S5PV210_PA_FB (0xF8000000) 67#define S5PV210_PA_IIS0 0xEEE30000
68#define S5PV210_PA_IIS1 0xE2100000
69#define S5PV210_PA_IIS2 0xE2A00000
74 70
75#define S5PV210_PA_FIMC0 (0xFB200000) 71#define S5PV210_PA_DMC0 0xF0000000
76#define S5PV210_PA_FIMC1 (0xFB300000) 72#define S5PV210_PA_DMC1 0xF1400000
77#define S5PV210_PA_FIMC2 (0xFB400000)
78 73
79#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) 74#define S5PV210_PA_VIC0 0xF2000000
75#define S5PV210_PA_VIC1 0xF2100000
76#define S5PV210_PA_VIC2 0xF2200000
77#define S5PV210_PA_VIC3 0xF2300000
80 78
81#define S5PV210_PA_HSOTG (0xEC000000) 79#define S5PV210_PA_FB 0xF8000000
82#define S5PV210_PA_HSPHY (0xEC100000)
83 80
84#define S5PV210_PA_VIC0 (0xF2000000) 81#define S5PV210_PA_MDMA 0xFA200000
85#define S5PV210_PA_VIC1 (0xF2100000) 82#define S5PV210_PA_PDMA0 0xE0900000
86#define S5PV210_PA_VIC2 (0xF2200000) 83#define S5PV210_PA_PDMA1 0xE0A00000
87#define S5PV210_PA_VIC3 (0xF2300000)
88 84
89#define S5PV210_PA_SDRAM (0x20000000) 85#define S5PV210_PA_MIPI_CSIS 0xFA600000
90#define S5P_PA_SDRAM S5PV210_PA_SDRAM
91 86
92/* S/PDIF */ 87#define S5PV210_PA_FIMC0 0xFB200000
93#define S5PV210_PA_SPDIF 0xE1100000 88#define S5PV210_PA_FIMC1 0xFB300000
89#define S5PV210_PA_FIMC2 0xFB400000
94 90
95/* I2S */ 91/* Compatibiltiy Defines */
96#define S5PV210_PA_IIS0 0xEEE30000
97#define S5PV210_PA_IIS1 0xE2100000
98#define S5PV210_PA_IIS2 0xE2A00000
99 92
100/* PCM */ 93#define S3C_PA_FB S5PV210_PA_FB
101#define S5PV210_PA_PCM0 0xE2300000 94#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
102#define S5PV210_PA_PCM1 0xE1200000 95#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
103#define S5PV210_PA_PCM2 0xE2B00000 96#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
97#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
98#define S3C_PA_IIC S5PV210_PA_IIC0
99#define S3C_PA_IIC1 S5PV210_PA_IIC1
100#define S3C_PA_IIC2 S5PV210_PA_IIC2
101#define S3C_PA_RTC S5PV210_PA_RTC
102#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
103#define S3C_PA_WDT S5PV210_PA_WATCHDOG
104 104
105/* AC97 */ 105#define S5P_PA_CHIPID S5PV210_PA_CHIPID
106#define S5PV210_PA_AC97 0xE2200000 106#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
107#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
108#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
109#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
110#define S5P_PA_ONENAND S5PC110_PA_ONENAND
111#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
112#define S5P_PA_SDRAM S5PV210_PA_SDRAM
113#define S5P_PA_SROMC S5PV210_PA_SROMC
114#define S5P_PA_SYSCON S5PV210_PA_SYSCON
115#define S5P_PA_TIMER S5PV210_PA_TIMER
107 116
108#define S5PV210_PA_ADC (0xE1700000) 117#define SAMSUNG_PA_ADC S5PV210_PA_ADC
118#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
119#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
109 120
110#define S5PV210_PA_DMC0 (0xF0000000) 121/* UART */
111#define S5PV210_PA_DMC1 (0xF1400000)
112 122
113#define S5PV210_PA_MIPI_CSIS 0xFA600000 123#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
114 124
115/* compatibiltiy defines. */ 125#define S3C_PA_UART S5PV210_PA_UART
116#define S3C_PA_UART S5PV210_PA_UART
117#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
118#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
119#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
120#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
121#define S3C_PA_IIC S5PV210_PA_IIC0
122#define S3C_PA_IIC1 S5PV210_PA_IIC1
123#define S3C_PA_IIC2 S5PV210_PA_IIC2
124#define S3C_PA_FB S5PV210_PA_FB
125#define S3C_PA_RTC S5PV210_PA_RTC
126#define S3C_PA_WDT S5PV210_PA_WATCHDOG
127#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
128#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
129#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
130#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
131#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
132 126
133#define SAMSUNG_PA_ADC S5PV210_PA_ADC 127#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
134#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON 128#define S5P_PA_UART0 S5P_PA_UART(0)
135#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD 129#define S5P_PA_UART1 S5P_PA_UART(1)
130#define S5P_PA_UART2 S5P_PA_UART(2)
131#define S5P_PA_UART3 S5P_PA_UART(3)
132
133#define S5P_SZ_UART SZ_256
136 134
137#endif /* __ASM_ARCH_MAP_H */ 135#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 461aa035afc0..557add4fc56c 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
149 149
150static struct regulator_init_data aquila_ldo3_data = { 150static struct regulator_init_data aquila_ldo3_data = {
151 .constraints = { 151 .constraints = {
152 .name = "VUSB/MIPI_1.1V", 152 .name = "VUSB+MIPI_1.1V",
153 .min_uV = 1100000, 153 .min_uV = 1100000,
154 .max_uV = 1100000, 154 .max_uV = 1100000,
155 .apply_uV = 1, 155 .apply_uV = 1,
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
197 197
198static struct regulator_init_data aquila_ldo8_data = { 198static struct regulator_init_data aquila_ldo8_data = {
199 .constraints = { 199 .constraints = {
200 .name = "VUSB/VADC_3.3V", 200 .name = "VUSB+VADC_3.3V",
201 .min_uV = 3300000, 201 .min_uV = 3300000,
202 .max_uV = 3300000, 202 .max_uV = 3300000,
203 .apply_uV = 1, 203 .apply_uV = 1,
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
207 207
208static struct regulator_init_data aquila_ldo9_data = { 208static struct regulator_init_data aquila_ldo9_data = {
209 .constraints = { 209 .constraints = {
210 .name = "VCC/VCAM_2.8V", 210 .name = "VCC+VCAM_2.8V",
211 .min_uV = 2800000, 211 .min_uV = 2800000,
212 .max_uV = 2800000, 212 .max_uV = 2800000,
213 .apply_uV = 1, 213 .apply_uV = 1,
@@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = {
381 .buck1_set1 = S5PV210_GPH0(3), 381 .buck1_set1 = S5PV210_GPH0(3),
382 .buck1_set2 = S5PV210_GPH0(4), 382 .buck1_set2 = S5PV210_GPH0(4),
383 .buck2_set3 = S5PV210_GPH0(5), 383 .buck2_set3 = S5PV210_GPH0(5),
384 .buck1_max_voltage1 = 1200000, 384 .buck1_voltage1 = 1200000,
385 .buck1_max_voltage2 = 1200000, 385 .buck1_voltage2 = 1200000,
386 .buck2_max_voltage = 1200000, 386 .buck1_voltage3 = 1200000,
387 .buck1_voltage4 = 1200000,
388 .buck2_voltage1 = 1200000,
389 .buck2_voltage2 = 1200000,
387}; 390};
388#endif 391#endif
389 392
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index e22d5112fd44..056f5c769b0a 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = {
288 288
289static struct regulator_init_data goni_ldo3_data = { 289static struct regulator_init_data goni_ldo3_data = {
290 .constraints = { 290 .constraints = {
291 .name = "VUSB/MIPI_1.1V", 291 .name = "VUSB+MIPI_1.1V",
292 .min_uV = 1100000, 292 .min_uV = 1100000,
293 .max_uV = 1100000, 293 .max_uV = 1100000,
294 .apply_uV = 1, 294 .apply_uV = 1,
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = {
337 337
338static struct regulator_init_data goni_ldo8_data = { 338static struct regulator_init_data goni_ldo8_data = {
339 .constraints = { 339 .constraints = {
340 .name = "VUSB/VADC_3.3V", 340 .name = "VUSB+VADC_3.3V",
341 .min_uV = 3300000, 341 .min_uV = 3300000,
342 .max_uV = 3300000, 342 .max_uV = 3300000,
343 .apply_uV = 1, 343 .apply_uV = 1,
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = {
347 347
348static struct regulator_init_data goni_ldo9_data = { 348static struct regulator_init_data goni_ldo9_data = {
349 .constraints = { 349 .constraints = {
350 .name = "VCC/VCAM_2.8V", 350 .name = "VCC+VCAM_2.8V",
351 .min_uV = 2800000, 351 .min_uV = 2800000,
352 .max_uV = 2800000, 352 .max_uV = 2800000,
353 .apply_uV = 1, 353 .apply_uV = 1,
@@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = {
521 .buck1_set1 = S5PV210_GPH0(3), 521 .buck1_set1 = S5PV210_GPH0(3),
522 .buck1_set2 = S5PV210_GPH0(4), 522 .buck1_set2 = S5PV210_GPH0(4),
523 .buck2_set3 = S5PV210_GPH0(5), 523 .buck2_set3 = S5PV210_GPH0(5),
524 .buck1_max_voltage1 = 1200000, 524 .buck1_voltage1 = 1200000,
525 .buck1_max_voltage2 = 1200000, 525 .buck1_voltage2 = 1200000,
526 .buck2_max_voltage = 1200000, 526 .buck1_voltage3 = 1200000,
527 .buck1_voltage4 = 1200000,
528 .buck2_voltage1 = 1200000,
529 .buck2_voltage2 = 1200000,
527}; 530};
528#endif 531#endif
529 532
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 3060f78e12ab..901657fa7a12 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5pv310/include/mach/map.h 1/* linux/arch/arm/mach-s5pv310/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5PV310 - Memory map definitions 6 * S5PV310 - Memory map definitions
@@ -23,90 +23,43 @@
23 23
24#include <plat/map-s5p.h> 24#include <plat/map-s5p.h>
25 25
26#define S5PV310_PA_SYSRAM (0x02025000) 26#define S5PV310_PA_SYSRAM 0x02025000
27 27
28#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) 28#define S5PV310_PA_I2S0 0x03830000
29 29#define S5PV310_PA_I2S1 0xE3100000
30#define S5PC210_PA_ONENAND (0x0C000000) 30#define S5PV310_PA_I2S2 0xE2A00000
31#define S5P_PA_ONENAND S5PC210_PA_ONENAND
32
33#define S5PC210_PA_ONENAND_DMA (0x0C600000)
34#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
35
36#define S5PV310_PA_CHIPID (0x10000000)
37#define S5P_PA_CHIPID S5PV310_PA_CHIPID
38
39#define S5PV310_PA_SYSCON (0x10010000)
40#define S5P_PA_SYSCON S5PV310_PA_SYSCON
41 31
42#define S5PV310_PA_PMU (0x10020000) 32#define S5PV310_PA_PCM0 0x03840000
33#define S5PV310_PA_PCM1 0x13980000
34#define S5PV310_PA_PCM2 0x13990000
43 35
44#define S5PV310_PA_CMU (0x10030000) 36#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
45
46#define S5PV310_PA_WATCHDOG (0x10060000)
47#define S5PV310_PA_RTC (0x10070000)
48
49#define S5PV310_PA_DMC0 (0x10400000)
50
51#define S5PV310_PA_COMBINER (0x10448000)
52
53#define S5PV310_PA_COREPERI (0x10500000)
54#define S5PV310_PA_GIC_CPU (0x10500100)
55#define S5PV310_PA_TWD (0x10500600)
56#define S5PV310_PA_GIC_DIST (0x10501000)
57#define S5PV310_PA_L2CC (0x10502000)
58
59/* DMA */
60#define S5PV310_PA_MDMA 0x10810000
61#define S5PV310_PA_PDMA0 0x12680000
62#define S5PV310_PA_PDMA1 0x12690000
63
64#define S5PV310_PA_GPIO1 (0x11400000)
65#define S5PV310_PA_GPIO2 (0x11000000)
66#define S5PV310_PA_GPIO3 (0x03860000)
67
68#define S5PV310_PA_MIPI_CSIS0 0x11880000
69#define S5PV310_PA_MIPI_CSIS1 0x11890000
70 37
71#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) 38#define S5PC210_PA_ONENAND 0x0C000000
39#define S5PC210_PA_ONENAND_DMA 0x0C600000
72 40
73#define S5PV310_PA_SROMC (0x12570000) 41#define S5PV310_PA_CHIPID 0x10000000
74#define S5P_PA_SROMC S5PV310_PA_SROMC
75 42
76/* S/PDIF */ 43#define S5PV310_PA_SYSCON 0x10010000
77#define S5PV310_PA_SPDIF 0xE1100000 44#define S5PV310_PA_PMU 0x10020000
45#define S5PV310_PA_CMU 0x10030000
78 46
79/* I2S */ 47#define S5PV310_PA_WATCHDOG 0x10060000
80#define S5PV310_PA_I2S0 0x03830000 48#define S5PV310_PA_RTC 0x10070000
81#define S5PV310_PA_I2S1 0xE3100000
82#define S5PV310_PA_I2S2 0xE2A00000
83 49
84/* PCM */ 50#define S5PV310_PA_DMC0 0x10400000
85#define S5PV310_PA_PCM0 0x03840000
86#define S5PV310_PA_PCM1 0x13980000
87#define S5PV310_PA_PCM2 0x13990000
88 51
89/* AC97 */ 52#define S5PV310_PA_COMBINER 0x10448000
90#define S5PV310_PA_AC97 0x139A0000
91 53
92#define S5PV310_PA_UART (0x13800000) 54#define S5PV310_PA_COREPERI 0x10500000
55#define S5PV310_PA_GIC_CPU 0x10500100
56#define S5PV310_PA_TWD 0x10500600
57#define S5PV310_PA_GIC_DIST 0x10501000
58#define S5PV310_PA_L2CC 0x10502000
93 59
94#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) 60#define S5PV310_PA_MDMA 0x10810000
95#define S5P_PA_UART0 S5P_PA_UART(0) 61#define S5PV310_PA_PDMA0 0x12680000
96#define S5P_PA_UART1 S5P_PA_UART(1) 62#define S5PV310_PA_PDMA1 0x12690000
97#define S5P_PA_UART2 S5P_PA_UART(2)
98#define S5P_PA_UART3 S5P_PA_UART(3)
99#define S5P_PA_UART4 S5P_PA_UART(4)
100
101#define S5P_SZ_UART SZ_256
102
103#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
104
105#define S5PV310_PA_TIMER (0x139D0000)
106#define S5P_PA_TIMER S5PV310_PA_TIMER
107
108#define S5PV310_PA_SDRAM (0x40000000)
109#define S5P_PA_SDRAM S5PV310_PA_SDRAM
110 63
111#define S5PV310_PA_SYSMMU_MDMA 0x10A40000 64#define S5PV310_PA_SYSMMU_MDMA 0x10A40000
112#define S5PV310_PA_SYSMMU_SSS 0x10A50000 65#define S5PV310_PA_SYSMMU_SSS 0x10A50000
@@ -125,8 +78,31 @@
125#define S5PV310_PA_SYSMMU_MFC_L 0x13620000 78#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
126#define S5PV310_PA_SYSMMU_MFC_R 0x13630000 79#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
127 80
128/* compatibiltiy defines. */ 81#define S5PV310_PA_GPIO1 0x11400000
129#define S3C_PA_UART S5PV310_PA_UART 82#define S5PV310_PA_GPIO2 0x11000000
83#define S5PV310_PA_GPIO3 0x03860000
84
85#define S5PV310_PA_MIPI_CSIS0 0x11880000
86#define S5PV310_PA_MIPI_CSIS1 0x11890000
87
88#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
89
90#define S5PV310_PA_SROMC 0x12570000
91
92#define S5PV310_PA_UART 0x13800000
93
94#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
95
96#define S5PV310_PA_AC97 0x139A0000
97
98#define S5PV310_PA_TIMER 0x139D0000
99
100#define S5PV310_PA_SDRAM 0x40000000
101
102#define S5PV310_PA_SPDIF 0xE1100000
103
104/* Compatibiltiy Defines */
105
130#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) 106#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
131#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) 107#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
132#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) 108#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
@@ -141,7 +117,28 @@
141#define S3C_PA_IIC7 S5PV310_PA_IIC(7) 117#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
142#define S3C_PA_RTC S5PV310_PA_RTC 118#define S3C_PA_RTC S5PV310_PA_RTC
143#define S3C_PA_WDT S5PV310_PA_WATCHDOG 119#define S3C_PA_WDT S5PV310_PA_WATCHDOG
120
121#define S5P_PA_CHIPID S5PV310_PA_CHIPID
144#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 122#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0
145#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 123#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1
124#define S5P_PA_ONENAND S5PC210_PA_ONENAND
125#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
126#define S5P_PA_SDRAM S5PV310_PA_SDRAM
127#define S5P_PA_SROMC S5PV310_PA_SROMC
128#define S5P_PA_SYSCON S5PV310_PA_SYSCON
129#define S5P_PA_TIMER S5PV310_PA_TIMER
130
131/* UART */
132
133#define S3C_PA_UART S5PV310_PA_UART
134
135#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
136#define S5P_PA_UART0 S5P_PA_UART(0)
137#define S5P_PA_UART1 S5P_PA_UART(1)
138#define S5P_PA_UART2 S5P_PA_UART(2)
139#define S5P_PA_UART3 S5P_PA_UART(3)
140#define S5P_PA_UART4 S5P_PA_UART(4)
141
142#define S5P_SZ_UART SZ_256
146 143
147#endif /* __ASM_ARCH_MAP_H */ 144#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
index cacf17a958cd..53677e464d4b 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear320.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -62,7 +62,7 @@
62#define SPEAR320_SMII1_BASE 0xAB000000 62#define SPEAR320_SMII1_BASE 0xAB000000
63#define SPEAR320_SMII1_SIZE 0x01000000 63#define SPEAR320_SMII1_SIZE 0x01000000
64 64
65#define SPEAR320_SOC_CONFIG_BASE 0xB4000000 65#define SPEAR320_SOC_CONFIG_BASE 0xB3000000
66#define SPEAR320_SOC_CONFIG_SIZE 0x00000070 66#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
67/* Interrupt registers offsets and masks */ 67/* Interrupt registers offsets and masks */
68#define INT_STS_MASK_REG 0x04 68#define INT_STS_MASK_REG 0x04
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 66ad2760c621..04c779832c78 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
57 const struct matrix_keymap_data *keymap_data; 57 const struct matrix_keymap_data *keymap_data;
58 58
59 bool wakeup; 59 bool wakeup;
60 bool use_fn_map;
60}; 61};
61#endif 62#endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 170c9bb95866..f2ce38e085d2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
49static inline void cache_sync(void) 49static inline void cache_sync(void)
50{ 50{
51 void __iomem *base = l2x0_base; 51 void __iomem *base = l2x0_base;
52
53#ifdef CONFIG_ARM_ERRATA_753970
54 /* write to an unmmapped register */
55 writel_relaxed(0, base + L2X0_DUMMY_REG);
56#else
52 writel_relaxed(0, base + L2X0_CACHE_SYNC); 57 writel_relaxed(0, base + L2X0_CACHE_SYNC);
58#endif
53 cache_wait(base + L2X0_CACHE_SYNC, 1); 59 cache_wait(base + L2X0_CACHE_SYNC, 1);
54} 60}
55 61
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0c1172b56b4e..8e3356239136 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -264,6 +264,12 @@ __v7_setup:
264 orreq r10, r10, #1 << 6 @ set bit #6 264 orreq r10, r10, #1 << 6 @ set bit #6
265 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 265 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
266#endif 266#endif
267#ifdef CONFIG_ARM_ERRATA_751472
268 cmp r6, #0x30 @ present prior to r3p0
269 mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
270 orrlt r10, r10, #1 << 11 @ set bit #11
271 mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
272#endif
267 273
2683: mov r10, #0 2743: mov r10, #0
269#ifdef HARVARD_CACHE 275#ifdef HARVARD_CACHE
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319a9fad..49d3208793e5 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
322 322
323struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) 323struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
324{ 324{
325 struct omap_mbox *mbox; 325 struct omap_mbox *_mbox, *mbox = NULL;
326 int ret; 326 int i, ret;
327 327
328 if (!mboxes) 328 if (!mboxes)
329 return ERR_PTR(-EINVAL); 329 return ERR_PTR(-EINVAL);
330 330
331 for (mbox = *mboxes; mbox; mbox++) 331 for (i = 0; (_mbox = mboxes[i]); i++) {
332 if (!strcmp(mbox->name, name)) 332 if (!strcmp(_mbox->name, name)) {
333 mbox = _mbox;
333 break; 334 break;
335 }
336 }
334 337
335 if (!mbox) 338 if (!mbox)
336 return ERR_PTR(-ENOENT); 339 return ERR_PTR(-ENOENT);
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
index 6a7342886171..afaf87fdb93e 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-s5p/dev-uart.c
@@ -28,7 +28,7 @@
28static struct resource s5p_uart0_resource[] = { 28static struct resource s5p_uart0_resource[] = {
29 [0] = { 29 [0] = {
30 .start = S5P_PA_UART0, 30 .start = S5P_PA_UART0,
31 .end = S5P_PA_UART0 + S5P_SZ_UART, 31 .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
32 .flags = IORESOURCE_MEM, 32 .flags = IORESOURCE_MEM,
33 }, 33 },
34 [1] = { 34 [1] = {
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
51static struct resource s5p_uart1_resource[] = { 51static struct resource s5p_uart1_resource[] = {
52 [0] = { 52 [0] = {
53 .start = S5P_PA_UART1, 53 .start = S5P_PA_UART1,
54 .end = S5P_PA_UART1 + S5P_SZ_UART, 54 .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
55 .flags = IORESOURCE_MEM, 55 .flags = IORESOURCE_MEM,
56 }, 56 },
57 [1] = { 57 [1] = {
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
74static struct resource s5p_uart2_resource[] = { 74static struct resource s5p_uart2_resource[] = {
75 [0] = { 75 [0] = {
76 .start = S5P_PA_UART2, 76 .start = S5P_PA_UART2,
77 .end = S5P_PA_UART2 + S5P_SZ_UART, 77 .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
78 .flags = IORESOURCE_MEM, 78 .flags = IORESOURCE_MEM,
79 }, 79 },
80 [1] = { 80 [1] = {
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
98#if CONFIG_SERIAL_SAMSUNG_UARTS > 3 98#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
99 [0] = { 99 [0] = {
100 .start = S5P_PA_UART3, 100 .start = S5P_PA_UART3,
101 .end = S5P_PA_UART3 + S5P_SZ_UART, 101 .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
102 .flags = IORESOURCE_MEM, 102 .flags = IORESOURCE_MEM,
103 }, 103 },
104 [1] = { 104 [1] = {
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
123#if CONFIG_SERIAL_SAMSUNG_UARTS > 4 123#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
124 [0] = { 124 [0] = {
125 .start = S5P_PA_UART4, 125 .start = S5P_PA_UART4,
126 .end = S5P_PA_UART4 + S5P_SZ_UART, 126 .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
127 .flags = IORESOURCE_MEM, 127 .flags = IORESOURCE_MEM,
128 }, 128 },
129 [1] = { 129 [1] = {
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
148#if CONFIG_SERIAL_SAMSUNG_UARTS > 5 148#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
149 [0] = { 149 [0] = {
150 .start = S5P_PA_UART5, 150 .start = S5P_PA_UART5,
151 .end = S5P_PA_UART5 + S5P_SZ_UART, 151 .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
152 .flags = IORESOURCE_MEM, 152 .flags = IORESOURCE_MEM,
153 }, 153 },
154 [1] = { 154 [1] = {
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
index 236ef8427d7d..3e4bd8147bf4 100644
--- a/arch/arm/plat-samsung/dev-ts.c
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
58 58
59 s3c_device_ts.dev.platform_data = npd; 59 s3c_device_ts.dev.platform_data = npd;
60} 60}
61EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 99ba6789cc97..6dd455bafdfd 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -24,10 +24,10 @@ static inline void putc(int c)
24{ 24{
25 void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; 25 void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
26 26
27 while (readl(base + UART01x_FR) & UART01x_FR_TXFF) 27 while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
28 barrier(); 28 barrier();
29 29
30 writel(c, base + UART01x_DR); 30 writel_relaxed(c, base + UART01x_DR);
31} 31}
32 32
33static inline void flush(void) 33static inline void flush(void)
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
index 09e9372aea21..8c8b24d07046 100644
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -14,6 +14,6 @@
14#ifndef __PLAT_VMALLOC_H 14#ifndef __PLAT_VMALLOC_H
15#define __PLAT_VMALLOC_H 15#define __PLAT_VMALLOC_H
16 16
17#define VMALLOC_END 0xF0000000 17#define VMALLOC_END 0xF0000000UL
18 18
19#endif /* __PLAT_VMALLOC_H */ 19#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 442218980db0..c49be845f96a 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -72,11 +72,6 @@ SECTIONS
72 INIT_TEXT_SECTION(PAGE_SIZE) 72 INIT_TEXT_SECTION(PAGE_SIZE)
73 .init.data : { INIT_DATA } 73 .init.data : { INIT_DATA }
74 .init.setup : { INIT_SETUP(16) } 74 .init.setup : { INIT_SETUP(16) }
75#ifdef CONFIG_ETRAX_ARCH_V32
76 __start___param = .;
77 __param : { *(__param) }
78 __stop___param = .;
79#endif
80 .initcall.init : { 75 .initcall.init : {
81 INIT_CALLS 76 INIT_CALLS
82 } 77 }
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 991d5998d6be..fe56a23e1ff0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -240,6 +240,12 @@ struct machdep_calls {
240 * claims to support kexec. 240 * claims to support kexec.
241 */ 241 */
242 int (*machine_kexec_prepare)(struct kimage *image); 242 int (*machine_kexec_prepare)(struct kimage *image);
243
244 /* Called to perform the _real_ kexec.
245 * Do NOT allocate memory or fail here. We are past the point of
246 * no return.
247 */
248 void (*machine_kexec)(struct kimage *image);
243#endif /* CONFIG_KEXEC */ 249#endif /* CONFIG_KEXEC */
244 250
245#ifdef CONFIG_SUSPEND 251#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 49a170af8145..a5f8672eeff3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
87 87
88 save_ftrace_enabled = __ftrace_enabled_save(); 88 save_ftrace_enabled = __ftrace_enabled_save();
89 89
90 default_machine_kexec(image); 90 if (ppc_md.machine_kexec)
91 ppc_md.machine_kexec(image);
92 else
93 default_machine_kexec(image);
91 94
92 __ftrace_enabled_restore(save_ftrace_enabled); 95 __ftrace_enabled_restore(save_ftrace_enabled);
93 96
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a1d5cb76932..8303a6c65ef7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
353 prime_debug_regs(new_thread); 353 prime_debug_regs(new_thread);
354} 354}
355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
356#ifndef CONFIG_HAVE_HW_BREAKPOINT
356static void set_debug_reg_defaults(struct thread_struct *thread) 357static void set_debug_reg_defaults(struct thread_struct *thread)
357{ 358{
358 if (thread->dabr) { 359 if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
360 set_dabr(0); 361 set_dabr(0);
361 } 362 }
362} 363}
364#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
363#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
364 366
365int set_dabr(unsigned long dabr) 367int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
670{ 672{
671 discard_lazy_cpu_state(); 673 discard_lazy_cpu_state();
672 674
673#ifdef CONFIG_HAVE_HW_BREAKPOINTS 675#ifdef CONFIG_HAVE_HW_BREAKPOINT
674 flush_ptrace_hw_breakpoint(current); 676 flush_ptrace_hw_breakpoint(current);
675#else /* CONFIG_HAVE_HW_BREAKPOINTS */ 677#else /* CONFIG_HAVE_HW_BREAKPOINT */
676 set_debug_reg_defaults(&current->thread); 678 set_debug_reg_defaults(&current->thread);
677#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 679#endif /* CONFIG_HAVE_HW_BREAKPOINT */
678} 680}
679 681
680void 682void
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * neesd to be flushed. This function will either perform the flush 38 * neesd to be flushed. This function will either perform the flush
39 * immediately or will batch it up if the current CPU has an active 39 * immediately or will batch it up if the current CPU has an active
40 * batch on it. 40 * batch on it.
41 *
42 * Must be called from within some kind of spinlock/non-preempt region...
43 */ 41 */
44void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
46{ 44{
47 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
48 unsigned long vsid, vaddr; 46 unsigned long vsid, vaddr;
49 unsigned int psize; 47 unsigned int psize;
50 int ssize; 48 int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
99 */ 97 */
100 if (!batch->active) { 98 if (!batch->active) {
101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 99 flush_hash_page(vaddr, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch);
102 return; 101 return;
103 } 102 }
104 103
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
127 batch->index = ++i; 126 batch->index = ++i;
128 if (i >= PPC64_TLB_BATCH_NR) 127 if (i >= PPC64_TLB_BATCH_NR)
129 __flush_tlb_pending(batch); 128 __flush_tlb_pending(batch);
129 put_cpu_var(ppc64_tlb_batch);
130} 130}
131 131
132/* 132/*
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 0851eb1e919e..2751b3a8a66f 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void)
133 unsigned long output_addr; 133 unsigned long output_addr;
134 unsigned char *output; 134 unsigned char *output;
135 135
136 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); 136 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
137 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
137 memset(&_bss, 0, &_ebss - &_bss); 138 memset(&_bss, 0, &_ebss - &_bss);
138 free_mem_ptr = (unsigned long)&_end; 139 free_mem_ptr = (unsigned long)&_end;
139 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 140 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
140 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); 141 output = (unsigned char *) output_addr;
141 142
142#ifdef CONFIG_BLK_DEV_INITRD 143#ifdef CONFIG_BLK_DEV_INITRD
143 /* 144 /*
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea117181..5c5ba10384c2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,14 +36,19 @@
36 36
37static inline int atomic_read(const atomic_t *v) 37static inline int atomic_read(const atomic_t *v)
38{ 38{
39 barrier(); 39 int c;
40 return v->counter; 40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
41} 45}
42 46
43static inline void atomic_set(atomic_t *v, int i) 47static inline void atomic_set(atomic_t *v, int i)
44{ 48{
45 v->counter = i; 49 asm volatile(
46 barrier(); 50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
47} 52}
48 53
49static inline int atomic_add_return(int i, atomic_t *v) 54static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
128 133
129static inline long long atomic64_read(const atomic64_t *v) 134static inline long long atomic64_read(const atomic64_t *v)
130{ 135{
131 barrier(); 136 long long c;
132 return v->counter; 137
138 asm volatile(
139 " lg %0,%1\n"
140 : "=d" (c) : "Q" (v->counter));
141 return c;
133} 142}
134 143
135static inline void atomic64_set(atomic64_t *v, long long i) 144static inline void atomic64_set(atomic64_t *v, long long i)
136{ 145{
137 v->counter = i; 146 asm volatile(
138 barrier(); 147 " stg %1,%0\n"
148 : "=Q" (v->counter) : "d" (i));
139} 149}
140 150
141static inline long long atomic64_add_return(long long i, atomic64_t *v) 151static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa68b643..2a30d5ac0667 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
13 13
14#define L1_CACHE_BYTES 256 14#define L1_CACHE_BYTES 256
15#define L1_CACHE_SHIFT 8 15#define L1_CACHE_SHIFT 8
16#define NET_SKB_PAD 32
16 17
17#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 18#define __read_mostly __attribute__((__section__(".data..read_mostly")))
18 19
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 211ca3f7fd16..4ea15ca89b2b 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -88,6 +88,7 @@ extern int acpi_disabled;
88extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
89extern int acpi_skip_timer_override; 89extern int acpi_skip_timer_override;
90extern int acpi_use_timer_override; 90extern int acpi_use_timer_override;
91extern int acpi_fix_pin2_polarity;
91 92
92extern u8 acpi_sci_flags; 93extern u8 acpi_sci_flags;
93extern int acpi_sci_override_gsi; 94extern int acpi_sci_override_gsi;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4d0dfa0d998e..43a18c77676d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,11 @@
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38 38
39#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
40#define NHM_C3_AUTO_DEMOTE (1UL << 25)
41#define NHM_C1_AUTO_DEMOTE (1UL << 26)
42#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
43
39#define MSR_MTRRcap 0x000000fe 44#define MSR_MTRRcap 0x000000fe
40#define MSR_IA32_BBL_CR_CTL 0x00000119 45#define MSR_IA32_BBL_CR_CTL 0x00000119
41 46
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index e2f6a99f14ab..cc29086e30cd 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -22,6 +22,7 @@
22 22
23#define ARCH_P4_CNTRVAL_BITS (40) 23#define ARCH_P4_CNTRVAL_BITS (40)
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) 24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
25 26
26#define P4_ESCR_EVENT_MASK 0x7e000000U 27#define P4_ESCR_EVENT_MASK 0x7e000000U
27#define P4_ESCR_EVENT_SHIFT 25 28#define P4_ESCR_EVENT_SHIFT 25
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf353f26..725b77831993 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; 37 *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b3a71137983a..3e6e2d68f761 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
72int acpi_sci_override_gsi __initdata; 72int acpi_sci_override_gsi __initdata;
73int acpi_skip_timer_override __initdata; 73int acpi_skip_timer_override __initdata;
74int acpi_use_timer_override __initdata; 74int acpi_use_timer_override __initdata;
75int acpi_fix_pin2_polarity __initdata;
75 76
76#ifdef CONFIG_X86_LOCAL_APIC 77#ifdef CONFIG_X86_LOCAL_APIC
77static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 78static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
415 return 0; 416 return 0;
416 } 417 }
417 418
418 if (acpi_skip_timer_override && 419 if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
419 intsrc->source_irq == 0 && intsrc->global_irq == 2) { 420 if (acpi_skip_timer_override) {
420 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 421 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
421 return 0; 422 return 0;
423 }
424 if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
425 intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
426 printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
427 }
422 } 428 }
423 429
424 mp_override_legacy_irq(intsrc->source_irq, 430 mp_override_legacy_irq(intsrc->source_irq,
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 51ef31a89be9..51d4e1663066 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
284 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 284 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
285 285
286 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 286 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
287 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 287 adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
288 global_clock_event = &adev->evt; 288 global_clock_event = &adev->evt;
289 printk(KERN_DEBUG "%s clockevent registered as global\n", 289 printk(KERN_DEBUG "%s clockevent registered as global\n",
290 global_clock_event->name); 290 global_clock_event->name);
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index bd1cac747f67..52c93648e492 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
158{ 158{
159 if (c->x86 == 0x06) { 159 if (c->x86 == 0x06) {
160 if (cpu_has(c, X86_FEATURE_EST)) 160 if (cpu_has(c, X86_FEATURE_EST))
161 printk(KERN_WARNING PFX "Warning: EST-capable CPU " 161 printk_once(KERN_WARNING PFX "Warning: EST-capable "
162 "detected. The acpi-cpufreq module offers " 162 "CPU detected. The acpi-cpufreq module offers "
163 "voltage scaling in addition of frequency " 163 "voltage scaling in addition to frequency "
164 "scaling. You should use that instead of " 164 "scaling. You should use that instead of "
165 "p4-clockmod, if possible.\n"); 165 "p4-clockmod, if possible.\n");
166 switch (c->x86_model) { 166 switch (c->x86_model) {
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35c7e65e59be..c567dec854f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
1537static int __cpuinit powernowk8_init(void) 1537static int __cpuinit powernowk8_init(void)
1538{ 1538{
1539 unsigned int i, supported_cpus = 0, cpu; 1539 unsigned int i, supported_cpus = 0, cpu;
1540 int rv;
1540 1541
1541 for_each_online_cpu(i) { 1542 for_each_online_cpu(i) {
1542 int rc; 1543 int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
1555 1556
1556 cpb_capable = true; 1557 cpb_capable = true;
1557 1558
1558 register_cpu_notifier(&cpb_nb);
1559
1560 msrs = msrs_alloc(); 1559 msrs = msrs_alloc();
1561 if (!msrs) { 1560 if (!msrs) {
1562 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1561 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1563 return -ENOMEM; 1562 return -ENOMEM;
1564 } 1563 }
1565 1564
1565 register_cpu_notifier(&cpb_nb);
1566
1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1567 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1567 1568
1568 for_each_cpu(cpu, cpu_online_mask) { 1569 for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
1574 (cpb_enabled ? "on" : "off")); 1575 (cpb_enabled ? "on" : "off"));
1575 } 1576 }
1576 1577
1577 return cpufreq_register_driver(&cpufreq_amd64_driver); 1578 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1579 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1580 unregister_cpu_notifier(&cpb_nb);
1581 msrs_free(msrs);
1582 msrs = NULL;
1583 }
1584 return rv;
1578} 1585}
1579 1586
1580/* driver entry point for term */ 1587/* driver entry point for term */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index f7a0993c1e7c..ff751a9f182b 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
770 return 1; 770 return 1;
771 } 771 }
772 772
773 /* it might be unflagged overflow */ 773 /*
774 rdmsrl(hwc->event_base + hwc->idx, v); 774 * In some circumstances the overflow might issue an NMI but did
775 if (!(v & ARCH_P4_CNTRVAL_MASK)) 775 * not set P4_CCCR_OVF bit. Because a counter holds a negative value
776 * we simply check for high bit being set, if it's cleared it means
777 * the counter has reached zero value and continued counting before
778 * real NMI signal was received:
779 */
780 if (!(v & ARCH_P4_UNFLAGGED_BIT))
776 return 1; 781 return 1;
777 782
778 return 0; 783 return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd953dee..9efbdcc56425 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
143 143
144static u32 __init ati_sbx00_rev(int num, int slot, int func) 144static u32 __init ati_sbx00_rev(int num, int slot, int func)
145{ 145{
146 u32 old, d; 146 u32 d;
147 147
148 d = read_pci_config(num, slot, func, 0x70);
149 old = d;
150 d &= ~(1<<8);
151 write_pci_config(num, slot, func, 0x70, d);
152 d = read_pci_config(num, slot, func, 0x8); 148 d = read_pci_config(num, slot, func, 0x8);
153 d &= 0xff; 149 d &= 0xff;
154 write_pci_config(num, slot, func, 0x70, old);
155 150
156 return d; 151 return d;
157} 152}
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
160{ 155{
161 u32 d, rev; 156 u32 d, rev;
162 157
163 if (acpi_use_timer_override)
164 return;
165
166 rev = ati_sbx00_rev(num, slot, func); 158 rev = ati_sbx00_rev(num, slot, func);
159 if (rev >= 0x40)
160 acpi_fix_pin2_polarity = 1;
161
167 if (rev > 0x13) 162 if (rev > 0x13)
168 return; 163 return;
169 164
165 if (acpi_use_timer_override)
166 return;
167
170 /* check for IRQ0 interrupt swap */ 168 /* check for IRQ0 interrupt swap */
171 d = read_pci_config(num, slot, func, 0x64); 169 d = read_pci_config(num, slot, func, 0x64);
172 if (!(d & (1<<14))) 170 if (!(d & (1<<14)))
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fc7aae1e2bc7..715037caeb43 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
285 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 285 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
286 }, 286 },
287 }, 287 },
288 { /* Handle problems with rebooting on VersaLogic Menlow boards */
289 .callback = set_bios_reboot,
290 .ident = "VersaLogic Menlow based board",
291 .matches = {
292 DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
293 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
294 },
295 },
288 { } 296 { }
289}; 297};
290 298
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 54ce246a383e..63fec1531e89 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
2777 kvm_register_write(&svm->vcpu, reg, val); 2777 kvm_register_write(&svm->vcpu, reg, val);
2778 } 2778 }
2779 2779
2780 skip_emulated_instruction(&svm->vcpu);
2781
2780 return 1; 2782 return 1;
2781} 2783}
2782 2784
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index dab874647530..044bda5b3174 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
140 * wasted bootmem) and hand off chunks of it to callers. 140 * wasted bootmem) and hand off chunks of it to callers.
141 */ 141 */
142 res = alloc_bootmem(chunk_size); 142 res = alloc_bootmem(chunk_size);
143 if (!res) 143 BUG_ON(!res);
144 return NULL;
145 prom_early_allocated += chunk_size; 144 prom_early_allocated += chunk_size;
146 memset(res, 0, chunk_size); 145 memset(res, 0, chunk_size);
147 free_mem = chunk_size; 146 free_mem = chunk_size;
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f4002f79a24..518dd423a5fe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
352 WARN_ON(!irqs_disabled()); 352 WARN_ON(!irqs_disabled());
353 353
354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
355 __blk_run_queue(q); 355 __blk_run_queue(q, false);
356} 356}
357EXPORT_SYMBOL(blk_start_queue); 357EXPORT_SYMBOL(blk_start_queue);
358 358
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
403/** 403/**
404 * __blk_run_queue - run a single device queue 404 * __blk_run_queue - run a single device queue
405 * @q: The queue to run 405 * @q: The queue to run
406 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
406 * 407 *
407 * Description: 408 * Description:
408 * See @blk_run_queue. This variant must be called with the queue lock 409 * See @blk_run_queue. This variant must be called with the queue lock
409 * held and interrupts disabled. 410 * held and interrupts disabled.
410 * 411 *
411 */ 412 */
412void __blk_run_queue(struct request_queue *q) 413void __blk_run_queue(struct request_queue *q, bool force_kblockd)
413{ 414{
414 blk_remove_plug(q); 415 blk_remove_plug(q);
415 416
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
423 * Only recurse once to avoid overrunning the stack, let the unplug 424 * Only recurse once to avoid overrunning the stack, let the unplug
424 * handling reinvoke the handler shortly if we already got there. 425 * handling reinvoke the handler shortly if we already got there.
425 */ 426 */
426 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 427 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
427 q->request_fn(q); 428 q->request_fn(q);
428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 429 queue_flag_clear(QUEUE_FLAG_REENTER, q);
429 } else { 430 } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
446 unsigned long flags; 447 unsigned long flags;
447 448
448 spin_lock_irqsave(q->queue_lock, flags); 449 spin_lock_irqsave(q->queue_lock, flags);
449 __blk_run_queue(q); 450 __blk_run_queue(q, false);
450 spin_unlock_irqrestore(q->queue_lock, flags); 451 spin_unlock_irqrestore(q->queue_lock, flags);
451} 452}
452EXPORT_SYMBOL(blk_run_queue); 453EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1053 1054
1054 drive_stat_acct(rq, 1); 1055 drive_stat_acct(rq, 1);
1055 __elv_add_request(q, rq, where, 0); 1056 __elv_add_request(q, rq, where, 0);
1056 __blk_run_queue(q); 1057 __blk_run_queue(q, false);
1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 spin_unlock_irqrestore(q->queue_lock, flags);
1058} 1059}
1059EXPORT_SYMBOL(blk_insert_request); 1060EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2610} 2611}
2611EXPORT_SYMBOL(kblockd_schedule_work); 2612EXPORT_SYMBOL(kblockd_schedule_work);
2612 2613
2613int kblockd_schedule_delayed_work(struct request_queue *q,
2614 struct delayed_work *dwork, unsigned long delay)
2615{
2616 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2617}
2618EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2619
2620int __init blk_dev_init(void) 2614int __init blk_dev_init(void)
2621{ 2615{
2622 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2616 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d6563e..b27d0208611b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
66 66
67 /* 67 /*
68 * Moving a request silently to empty queue_head may stall the 68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases. 69 * queue. Kick the queue in those cases. This function is called
70 * from request completion path and calling directly into
71 * request_fn may confuse the driver. Always use kblockd.
70 */ 72 */
71 if (was_empty && next_rq) 73 if (was_empty && next_rq)
72 __blk_run_queue(q); 74 __blk_run_queue(q, true);
73} 75}
74 76
75static void pre_flush_end_io(struct request *rq, int error) 77static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
130 BUG(); 132 BUG();
131 } 133 }
132 134
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 135 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
134 return rq; 136 return rq;
135} 137}
136 138
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..eec78becb355 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
132} 132}
133 133
134/** 134/**
135 * blkdev_issue_zeroout generate number of zero filed write bios 135 * blkdev_issue_zeroout - generate number of zero filed write bios
136 * @bdev: blockdev to issue 136 * @bdev: blockdev to issue
137 * @sector: start sector 137 * @sector: start sector
138 * @nr_sects: number of sectors to write 138 * @nr_sects: number of sectors to write
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a89043a3caa4..e36cc10a346c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
20/* Throttling is performed over 100ms slice and after that slice is renewed */ 20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */ 21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22 22
23/* A workqueue to queue throttle related work */
24static struct workqueue_struct *kthrotld_workqueue;
25static void throtl_schedule_delayed_work(struct throtl_data *td,
26 unsigned long delay);
27
23struct throtl_rb_root { 28struct throtl_rb_root {
24 struct rb_root rb; 29 struct rb_root rb;
25 struct rb_node *left; 30 struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
345 update_min_dispatch_time(st); 350 update_min_dispatch_time(st);
346 351
347 if (time_before_eq(st->min_disptime, jiffies)) 352 if (time_before_eq(st->min_disptime, jiffies))
348 throtl_schedule_delayed_work(td->queue, 0); 353 throtl_schedule_delayed_work(td, 0);
349 else 354 else
350 throtl_schedule_delayed_work(td->queue, 355 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
351 (st->min_disptime - jiffies));
352} 356}
353 357
354static inline void 358static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
815} 819}
816 820
817/* Call with queue lock held */ 821/* Call with queue lock held */
818void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 822static void
823throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
819{ 824{
820 825
821 struct throtl_data *td = q->td;
822 struct delayed_work *dwork = &td->throtl_work; 826 struct delayed_work *dwork = &td->throtl_work;
823 827
824 if (total_nr_queued(td) > 0) { 828 if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
827 * Cancel that and schedule a new one. 831 * Cancel that and schedule a new one.
828 */ 832 */
829 __cancel_delayed_work(dwork); 833 __cancel_delayed_work(dwork);
830 kblockd_schedule_delayed_work(q, dwork, delay); 834 queue_delayed_work(kthrotld_workqueue, dwork, delay);
831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 835 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
832 delay, jiffies); 836 delay, jiffies);
833 } 837 }
834} 838}
835EXPORT_SYMBOL(throtl_schedule_delayed_work);
836 839
837static void 840static void
838throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 841throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
920 smp_mb__after_atomic_inc(); 923 smp_mb__after_atomic_inc();
921 924
922 /* Schedule a work now to process the limit change */ 925 /* Schedule a work now to process the limit change */
923 throtl_schedule_delayed_work(td->queue, 0); 926 throtl_schedule_delayed_work(td, 0);
924} 927}
925 928
926static void throtl_update_blkio_group_write_bps(void *key, 929static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
934 smp_mb__before_atomic_inc(); 937 smp_mb__before_atomic_inc();
935 atomic_inc(&td->limits_changed); 938 atomic_inc(&td->limits_changed);
936 smp_mb__after_atomic_inc(); 939 smp_mb__after_atomic_inc();
937 throtl_schedule_delayed_work(td->queue, 0); 940 throtl_schedule_delayed_work(td, 0);
938} 941}
939 942
940static void throtl_update_blkio_group_read_iops(void *key, 943static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
948 smp_mb__before_atomic_inc(); 951 smp_mb__before_atomic_inc();
949 atomic_inc(&td->limits_changed); 952 atomic_inc(&td->limits_changed);
950 smp_mb__after_atomic_inc(); 953 smp_mb__after_atomic_inc();
951 throtl_schedule_delayed_work(td->queue, 0); 954 throtl_schedule_delayed_work(td, 0);
952} 955}
953 956
954static void throtl_update_blkio_group_write_iops(void *key, 957static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
962 smp_mb__before_atomic_inc(); 965 smp_mb__before_atomic_inc();
963 atomic_inc(&td->limits_changed); 966 atomic_inc(&td->limits_changed);
964 smp_mb__after_atomic_inc(); 967 smp_mb__after_atomic_inc();
965 throtl_schedule_delayed_work(td->queue, 0); 968 throtl_schedule_delayed_work(td, 0);
966} 969}
967 970
968void throtl_shutdown_timer_wq(struct request_queue *q) 971void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
1135 1138
1136static int __init throtl_init(void) 1139static int __init throtl_init(void)
1137{ 1140{
1141 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1142 if (!kthrotld_workqueue)
1143 panic("Failed to create kthrotld\n");
1144
1138 blkio_policy_register(&blkio_policy_throtl); 1145 blkio_policy_register(&blkio_policy_throtl);
1139 return 0; 1146 return 0;
1140} 1147}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7be4c7959625..ea83a4f0c27d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3355 cfqd->busy_queues > 1) { 3355 cfqd->busy_queues > 1) {
3356 cfq_del_timer(cfqd, cfqq); 3356 cfq_del_timer(cfqd, cfqq);
3357 cfq_clear_cfqq_wait_request(cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq);
3358 __blk_run_queue(cfqd->queue); 3358 __blk_run_queue(cfqd->queue, false);
3359 } else { 3359 } else {
3360 cfq_blkiocg_update_idle_time_stats( 3360 cfq_blkiocg_update_idle_time_stats(
3361 &cfqq->cfqg->blkg); 3361 &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3370 * this new queue is RT and the current one is BE 3370 * this new queue is RT and the current one is BE
3371 */ 3371 */
3372 cfq_preempt_queue(cfqd, cfqq); 3372 cfq_preempt_queue(cfqd, cfqq);
3373 __blk_run_queue(cfqd->queue); 3373 __blk_run_queue(cfqd->queue, false);
3374 } 3374 }
3375} 3375}
3376 3376
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
3731 struct request_queue *q = cfqd->queue; 3731 struct request_queue *q = cfqd->queue;
3732 3732
3733 spin_lock_irq(q->queue_lock); 3733 spin_lock_irq(q->queue_lock);
3734 __blk_run_queue(cfqd->queue); 3734 __blk_run_queue(cfqd->queue, false);
3735 spin_unlock_irq(q->queue_lock); 3735 spin_unlock_irq(q->queue_lock);
3736} 3736}
3737 3737
diff --git a/block/elevator.c b/block/elevator.c
index 2569512830d3..236e93c1f46c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
602 */ 602 */
603 elv_drain_elevator(q); 603 elv_drain_elevator(q);
604 while (q->rq.elvpriv) { 604 while (q->rq.elvpriv) {
605 __blk_run_queue(q); 605 __blk_run_queue(q, false);
606 spin_unlock_irq(q->queue_lock); 606 spin_unlock_irq(q->queue_lock);
607 msleep(10); 607 msleep(10);
608 spin_lock_irq(q->queue_lock); 608 spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
651 * with anything. There's no point in delaying queue 651 * with anything. There's no point in delaying queue
652 * processing. 652 * processing.
653 */ 653 */
654 __blk_run_queue(q); 654 __blk_run_queue(q, false);
655 break; 655 break;
656 656
657 case ELEVATOR_INSERT_SORT: 657 case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772aa201..cbf1112a885c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
1355 struct block_device *bdev = bdget_disk(disk, partno); 1355 struct block_device *bdev = bdget_disk(disk, partno);
1356 if (bdev) { 1356 if (bdev) {
1357 fsync_bdev(bdev); 1357 fsync_bdev(bdev);
1358 res = __invalidate_device(bdev); 1358 res = __invalidate_device(bdev, true);
1359 bdput(bdev); 1359 bdput(bdev);
1360 } 1360 }
1361 return res; 1361 return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
294 return -EINVAL; 294 return -EINVAL;
295 if (get_user(n, (int __user *) arg)) 295 if (get_user(n, (int __user *) arg))
296 return -EFAULT; 296 return -EFAULT;
297 if (!(mode & FMODE_EXCL) && 297 if (!(mode & FMODE_EXCL)) {
298 blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) 298 bdgrab(bdev);
299 return -EBUSY; 299 if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
300 return -EBUSY;
301 }
300 ret = set_blocksize(bdev, n); 302 ret = set_blocksize(bdev, n);
301 if (!(mode & FMODE_EXCL)) 303 if (!(mode & FMODE_EXCL))
302 blkdev_put(bdev, mode | FMODE_EXCL); 304 blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 54784bb42cec..edc25867ad9d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
416 u8 originally_enabled; /* True if GPE was originally enabled */ 416 u8 originally_enabled; /* True if GPE was originally enabled */
417}; 417};
418 418
419struct acpi_gpe_notify_object {
420 struct acpi_namespace_node *node;
421 struct acpi_gpe_notify_object *next;
422};
423
419union acpi_gpe_dispatch_info { 424union acpi_gpe_dispatch_info {
420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 425 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 426 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
422 struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ 427 struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
423}; 428};
424 429
425/* 430/*
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 14988a86066f..f4725212eb48 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
457 acpi_status status; 457 acpi_status status;
458 struct acpi_gpe_event_info *local_gpe_event_info; 458 struct acpi_gpe_event_info *local_gpe_event_info;
459 struct acpi_evaluate_info *info; 459 struct acpi_evaluate_info *info;
460 struct acpi_gpe_notify_object *notify_object;
460 461
461 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
462 463
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
508 * from this thread -- because handlers may in turn run other 509 * from this thread -- because handlers may in turn run other
509 * control methods. 510 * control methods.
510 */ 511 */
511 status = 512 status = acpi_ev_queue_notify_request(
512 acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. 513 local_gpe_event_info->dispatch.device.node,
513 device_node, 514 ACPI_NOTIFY_DEVICE_WAKE);
514 ACPI_NOTIFY_DEVICE_WAKE); 515
516 notify_object = local_gpe_event_info->dispatch.device.next;
517 while (ACPI_SUCCESS(status) && notify_object) {
518 status = acpi_ev_queue_notify_request(
519 notify_object->node,
520 ACPI_NOTIFY_DEVICE_WAKE);
521 notify_object = notify_object->next;
522 }
523
515 break; 524 break;
516 525
517 case ACPI_GPE_DISPATCH_METHOD: 526 case ACPI_GPE_DISPATCH_METHOD:
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3b20a3401b64..52aaff3df562 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
198 acpi_status status = AE_BAD_PARAMETER; 198 acpi_status status = AE_BAD_PARAMETER;
199 struct acpi_gpe_event_info *gpe_event_info; 199 struct acpi_gpe_event_info *gpe_event_info;
200 struct acpi_namespace_node *device_node; 200 struct acpi_namespace_node *device_node;
201 struct acpi_gpe_notify_object *notify_object;
201 acpi_cpu_flags flags; 202 acpi_cpu_flags flags;
203 u8 gpe_dispatch_mask;
202 204
203 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 205 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
204 206
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
221 goto unlock_and_exit; 223 goto unlock_and_exit;
222 } 224 }
223 225
226 if (wake_device == ACPI_ROOT_OBJECT) {
227 goto out;
228 }
229
224 /* 230 /*
225 * If there is no method or handler for this GPE, then the 231 * If there is no method or handler for this GPE, then the
226 * wake_device will be notified whenever this GPE fires (aka 232 * wake_device will be notified whenever this GPE fires (aka
227 * "implicit notify") Note: The GPE is assumed to be 233 * "implicit notify") Note: The GPE is assumed to be
228 * level-triggered (for windows compatibility). 234 * level-triggered (for windows compatibility).
229 */ 235 */
230 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 236 gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
231 ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { 237 if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
238 && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
239 goto out;
240 }
232 241
233 /* Validate wake_device is of type Device */ 242 /* Validate wake_device is of type Device */
234 243
235 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, 244 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
236 wake_device); 245 if (device_node->type != ACPI_TYPE_DEVICE) {
237 if (device_node->type != ACPI_TYPE_DEVICE) { 246 goto unlock_and_exit;
238 goto unlock_and_exit; 247 }
239 } 248
249 if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
240 gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | 250 gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
241 ACPI_GPE_LEVEL_TRIGGERED); 251 ACPI_GPE_LEVEL_TRIGGERED);
242 gpe_event_info->dispatch.device_node = device_node; 252 gpe_event_info->dispatch.device.node = device_node;
253 gpe_event_info->dispatch.device.next = NULL;
254 } else {
255 /* There are multiple devices to notify implicitly. */
256
257 notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
258 if (!notify_object) {
259 status = AE_NO_MEMORY;
260 goto unlock_and_exit;
261 }
262
263 notify_object->node = device_node;
264 notify_object->next = gpe_event_info->dispatch.device.next;
265 gpe_event_info->dispatch.device.next = notify_object;
243 } 266 }
244 267
268 out:
245 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 269 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
246 status = AE_OK; 270 status = AE_OK;
247 271
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1d6c61..384f7abcff77 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
26 size_t count, loff_t *ppos) 26 size_t count, loff_t *ppos)
27{ 27{
28 static char *buf; 28 static char *buf;
29 static int uncopied_bytes; 29 static u32 max_size;
30 static u32 uncopied_bytes;
31
30 struct acpi_table_header table; 32 struct acpi_table_header table;
31 acpi_status status; 33 acpi_status status;
32 34
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37 if (copy_from_user(&table, user_buf, 39 if (copy_from_user(&table, user_buf,
38 sizeof(struct acpi_table_header))) 40 sizeof(struct acpi_table_header)))
39 return -EFAULT; 41 return -EFAULT;
40 uncopied_bytes = table.length; 42 uncopied_bytes = max_size = table.length;
41 buf = kzalloc(uncopied_bytes, GFP_KERNEL); 43 buf = kzalloc(max_size, GFP_KERNEL);
42 if (!buf) 44 if (!buf)
43 return -ENOMEM; 45 return -ENOMEM;
44 } 46 }
45 47
46 if (uncopied_bytes < count) { 48 if (buf == NULL)
47 kfree(buf); 49 return -EINVAL;
50
51 if ((*ppos > max_size) ||
52 (*ppos + count > max_size) ||
53 (*ppos + count < count) ||
54 (count > uncopied_bytes))
48 return -EINVAL; 55 return -EINVAL;
49 }
50 56
51 if (copy_from_user(buf + (*ppos), user_buf, count)) { 57 if (copy_from_user(buf + (*ppos), user_buf, count)) {
52 kfree(buf); 58 kfree(buf);
59 buf = NULL;
53 return -EFAULT; 60 return -EFAULT;
54 } 61 }
55 62
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
59 if (!uncopied_bytes) { 66 if (!uncopied_bytes) {
60 status = acpi_install_method(buf); 67 status = acpi_install_method(buf);
61 kfree(buf); 68 kfree(buf);
69 buf = NULL;
62 if (ACPI_FAILURE(status)) 70 if (ACPI_FAILURE(status))
63 return -EINVAL; 71 return -EINVAL;
64 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); 72 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 8cbfaa687d72..fe81c851ca88 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
2177 return; 2177 return;
2178 } 2178 }
2179 2179
2180 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2180 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
2181 retcode = ERR_PERM; 2181 retcode = ERR_PERM;
2182 goto fail; 2182 goto fail;
2183 } 2183 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3281 struct block_device *bdev = opened_bdev[cnt]; 3281 struct block_device *bdev = opened_bdev[cnt];
3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) 3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
3283 continue; 3283 continue;
3284 __invalidate_device(bdev); 3284 __invalidate_device(bdev, true);
3285 } 3285 }
3286 mutex_unlock(&open_lock); 3286 mutex_unlock(&open_lock);
3287 } else { 3287 } else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a545eb63..dbf31ec9114d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
78 78
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static DEFINE_MUTEX(loop_mutex);
82static LIST_HEAD(loop_devices); 81static LIST_HEAD(loop_devices);
83static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_devices_mutex);
84 83
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1501{ 1500{
1502 struct loop_device *lo = bdev->bd_disk->private_data; 1501 struct loop_device *lo = bdev->bd_disk->private_data;
1503 1502
1504 mutex_lock(&loop_mutex);
1505 mutex_lock(&lo->lo_ctl_mutex); 1503 mutex_lock(&lo->lo_ctl_mutex);
1506 lo->lo_refcnt++; 1504 lo->lo_refcnt++;
1507 mutex_unlock(&lo->lo_ctl_mutex); 1505 mutex_unlock(&lo->lo_ctl_mutex);
1508 mutex_unlock(&loop_mutex);
1509 1506
1510 return 0; 1507 return 0;
1511} 1508}
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1515 struct loop_device *lo = disk->private_data; 1512 struct loop_device *lo = disk->private_data;
1516 int err; 1513 int err;
1517 1514
1518 mutex_lock(&loop_mutex);
1519 mutex_lock(&lo->lo_ctl_mutex); 1515 mutex_lock(&lo->lo_ctl_mutex);
1520 1516
1521 if (--lo->lo_refcnt) 1517 if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1540out: 1536out:
1541 mutex_unlock(&lo->lo_ctl_mutex); 1537 mutex_unlock(&lo->lo_ctl_mutex);
1542out_unlocked: 1538out_unlocked:
1543 mutex_unlock(&loop_mutex);
1544 return 0; 1539 return 0;
1545} 1540}
1546 1541
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 411ae9c9b384..7e0ebd4a1a74 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1043,8 +1043,6 @@ static int btusb_probe(struct usb_interface *intf,
1043 1043
1044 usb_set_intfdata(intf, data); 1044 usb_set_intfdata(intf, data);
1045 1045
1046 usb_enable_autosuspend(interface_to_usbdev(intf));
1047
1048 return 0; 1046 return 0;
1049} 1047}
1050 1048
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85706ef..780498d76581 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
773#else 773#else
774 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); 774 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
775#endif 775#endif
776 pci_unregister_driver(&agp_amd64_pci_driver);
776 return -ENODEV; 777 return -ENODEV;
777 } 778 }
778 779
779 /* First check that we have at least one AMD64 NB */ 780 /* First check that we have at least one AMD64 NB */
780 if (!pci_dev_present(amd_nb_misc_ids)) 781 if (!pci_dev_present(amd_nb_misc_ids)) {
782 pci_unregister_driver(&agp_amd64_pci_driver);
781 return -ENODEV; 783 return -ENODEV;
784 }
782 785
783 /* Look for any AGP bridge */ 786 /* Look for any AGP bridge */
784 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; 787 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
785 err = driver_attach(&agp_amd64_pci_driver.driver); 788 err = driver_attach(&agp_amd64_pci_driver.driver);
786 if (err == 0 && agp_bridges_found == 0) 789 if (err == 0 && agp_bridges_found == 0) {
790 pci_unregister_driver(&agp_amd64_pci_driver);
787 err = -ENODEV; 791 err = -ENODEV;
792 }
788 } 793 }
789 return err; 794 return err;
790} 795}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
131 131
132#define I915_IFPADDR 0x60 132#define I915_IFPADDR 0x60
133#define I830_HIC 0x70
133 134
134/* Intel 965G registers */ 135/* Intel 965G registers */
135#define I965_MSAC 0x62 136#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/agp_backend.h> 23#include <linux/agp_backend.h>
24#include <linux/delay.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
25#include "agp.h" 26#include "agp.h"
26#include "intel-agp.h" 27#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
70 u32 __iomem *gtt; /* I915G */ 71 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */ 72 bool clear_fake_agp; /* on first access via agp, fill with scratch */
72 int num_dcache_entries; 73 int num_dcache_entries;
73 union { 74 void __iomem *i9xx_flush_page;
74 void __iomem *i9xx_flush_page;
75 void *i8xx_flush_page;
76 };
77 char *i81x_gtt_table; 75 char *i81x_gtt_table;
78 struct page *i8xx_page;
79 struct resource ifp_resource; 76 struct resource ifp_resource;
80 int resource_valid; 77 int resource_valid;
81 struct page *scratch_page; 78 struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
722 719
723static void i830_cleanup(void) 720static void i830_cleanup(void)
724{ 721{
725 if (intel_private.i8xx_flush_page) {
726 kunmap(intel_private.i8xx_flush_page);
727 intel_private.i8xx_flush_page = NULL;
728 }
729
730 __free_page(intel_private.i8xx_page);
731 intel_private.i8xx_page = NULL;
732}
733
734static void intel_i830_setup_flush(void)
735{
736 /* return if we've already set the flush mechanism up */
737 if (intel_private.i8xx_page)
738 return;
739
740 intel_private.i8xx_page = alloc_page(GFP_KERNEL);
741 if (!intel_private.i8xx_page)
742 return;
743
744 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
745 if (!intel_private.i8xx_flush_page)
746 i830_cleanup();
747} 722}
748 723
749/* The chipset_flush interface needs to get data that has already been 724/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
758 */ 733 */
759static void i830_chipset_flush(void) 734static void i830_chipset_flush(void)
760{ 735{
761 unsigned int *pg = intel_private.i8xx_flush_page; 736 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
737
738 /* Forcibly evict everything from the CPU write buffers.
739 * clflush appears to be insufficient.
740 */
741 wbinvd_on_all_cpus();
742
743 /* Now we've only seen documents for this magic bit on 855GM,
744 * we hope it exists for the other gen2 chipsets...
745 *
746 * Also works as advertised on my 845G.
747 */
748 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
749 intel_private.registers+I830_HIC);
762 750
763 memset(pg, 0, 1024); 751 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
752 if (time_after(jiffies, timeout))
753 break;
764 754
765 if (cpu_has_clflush) 755 udelay(50);
766 clflush_cache_range(pg, 1024); 756 }
767 else if (wbinvd_on_all_cpus() != 0)
768 printk(KERN_ERR "Timed out waiting for cache flush.\n");
769} 757}
770 758
771static void i830_write_entry(dma_addr_t addr, unsigned int entry, 759static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
849 837
850 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 838 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
851 839
852 intel_i830_setup_flush();
853
854 return 0; 840 return 0;
855} 841}
856 842
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a2e603..bcbbc71febb7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
830 test_bit(IS_ANY_T1, &dev->flags))) { 830 test_bit(IS_ANY_T1, &dev->flags))) {
831 DEBUGP(4, dev, "Perform AUTOPPS\n"); 831 DEBUGP(4, dev, "Perform AUTOPPS\n");
832 set_bit(IS_AUTOPPS_ACT, &dev->flags); 832 set_bit(IS_AUTOPPS_ACT, &dev->flags);
833 ptsreq.protocol = ptsreq.protocol = 833 ptsreq.protocol = (0x01 << dev->proto);
834 (0x01 << dev->proto);
835 ptsreq.flags = 0x01; 834 ptsreq.flags = 0x01;
836 ptsreq.pts1 = 0x00; 835 ptsreq.pts1 = 0x00;
837 ptsreq.pts2 = 0x00; 836 ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4d691d..444155a305ae 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
78static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) 78static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
79{ 79{
80 struct ipw_dev *ipw = priv_data; 80 struct ipw_dev *ipw = priv_data;
81 struct resource *io_resource;
82 int ret; 81 int ret;
83 82
84 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 83 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
92 if (ret) 91 if (ret)
93 return ret; 92 return ret;
94 93
95 io_resource = request_region(p_dev->resource[0]->start, 94 if (!request_region(p_dev->resource[0]->start,
96 resource_size(p_dev->resource[0]), 95 resource_size(p_dev->resource[0]),
97 IPWIRELESS_PCCARD_NAME); 96 IPWIRELESS_PCCARD_NAME)) {
97 ret = -EBUSY;
98 goto exit;
99 }
98 100
99 p_dev->resource[2]->flags |= 101 p_dev->resource[2]->flags |=
100 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; 102 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
105 107
106 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); 108 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
107 if (ret != 0) 109 if (ret != 0)
108 goto exit2; 110 goto exit1;
109 111
110 ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; 112 ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
111 113
112 ipw->attr_memory = ioremap(p_dev->resource[2]->start, 114 ipw->common_memory = ioremap(p_dev->resource[2]->start,
113 resource_size(p_dev->resource[2])); 115 resource_size(p_dev->resource[2]));
114 request_mem_region(p_dev->resource[2]->start, 116 if (!request_mem_region(p_dev->resource[2]->start,
115 resource_size(p_dev->resource[2]), 117 resource_size(p_dev->resource[2]),
116 IPWIRELESS_PCCARD_NAME); 118 IPWIRELESS_PCCARD_NAME)) {
119 ret = -EBUSY;
120 goto exit2;
121 }
117 122
118 p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | 123 p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
119 WIN_ENABLE; 124 WIN_ENABLE;
120 p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ 125 p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
121 ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); 126 ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
122 if (ret != 0) 127 if (ret != 0)
123 goto exit2; 128 goto exit3;
124 129
125 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); 130 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
126 if (ret != 0) 131 if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
128 133
129 ipw->attr_memory = ioremap(p_dev->resource[3]->start, 134 ipw->attr_memory = ioremap(p_dev->resource[3]->start,
130 resource_size(p_dev->resource[3])); 135 resource_size(p_dev->resource[3]));
131 request_mem_region(p_dev->resource[3]->start, 136 if (!request_mem_region(p_dev->resource[3]->start,
132 resource_size(p_dev->resource[3]), 137 resource_size(p_dev->resource[3]),
133 IPWIRELESS_PCCARD_NAME); 138 IPWIRELESS_PCCARD_NAME)) {
139 ret = -EBUSY;
140 goto exit4;
141 }
134 142
135 return 0; 143 return 0;
136 144
145exit4:
146 iounmap(ipw->attr_memory);
137exit3: 147exit3:
148 release_mem_region(p_dev->resource[2]->start,
149 resource_size(p_dev->resource[2]));
138exit2: 150exit2:
139 if (ipw->common_memory) { 151 iounmap(ipw->common_memory);
140 release_mem_region(p_dev->resource[2]->start,
141 resource_size(p_dev->resource[2]));
142 iounmap(ipw->common_memory);
143 }
144exit1: 152exit1:
145 release_resource(io_resource); 153 release_region(p_dev->resource[0]->start,
154 resource_size(p_dev->resource[0]));
155exit:
146 pcmcia_disable_device(p_dev); 156 pcmcia_disable_device(p_dev);
147 return -1; 157 return ret;
148} 158}
149 159
150static int config_ipwireless(struct ipw_dev *ipw) 160static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
219 229
220static void release_ipwireless(struct ipw_dev *ipw) 230static void release_ipwireless(struct ipw_dev *ipw)
221{ 231{
232 release_region(ipw->link->resource[0]->start,
233 resource_size(ipw->link->resource[0]));
222 if (ipw->common_memory) { 234 if (ipw->common_memory) {
223 release_mem_region(ipw->link->resource[2]->start, 235 release_mem_region(ipw->link->resource[2]->start,
224 resource_size(ipw->link->resource[2])); 236 resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index faf5a2c65926..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
364 tpm_protected_ordinal_duration[ordinal & 364 tpm_protected_ordinal_duration[ordinal &
365 TPM_PROTECTED_ORDINAL_MASK]; 365 TPM_PROTECTED_ORDINAL_MASK];
366 366
367 if (duration_idx != TPM_UNDEFINED) { 367 if (duration_idx != TPM_UNDEFINED)
368 duration = chip->vendor.duration[duration_idx]; 368 duration = chip->vendor.duration[duration_idx];
369 /* if duration is 0, it's because chip->vendor.duration wasn't */ 369 if (duration <= 0)
370 /* filled yet, so we set the lowest timeout just to give enough */
371 /* time for tpm_get_timeouts() to succeed */
372 return (duration <= 0 ? HZ : duration);
373 } else
374 return 2 * 60 * HZ; 370 return 2 * 60 * HZ;
371 else
372 return duration;
375} 373}
376EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); 374EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
377 375
@@ -577,11 +575,9 @@ duration:
577 if (rc) 575 if (rc)
578 return; 576 return;
579 577
580 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || 578 if (be32_to_cpu(tpm_cmd.header.out.return_code)
581 be32_to_cpu(tpm_cmd.header.out.length) 579 != 3 * sizeof(u32))
582 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
583 return; 580 return;
584
585 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 581 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
586 chip->vendor.duration[TPM_SHORT] = 582 chip->vendor.duration[TPM_SHORT] =
587 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); 583 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -941,18 +937,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
941} 937}
942EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 938EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
943 939
944ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
945 char *buf)
946{
947 struct tpm_chip *chip = dev_get_drvdata(dev);
948
949 return sprintf(buf, "%d %d %d\n",
950 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
951 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
952 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
953}
954EXPORT_SYMBOL_GPL(tpm_show_timeouts);
955
956ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, 940ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
957 const char *buf, size_t count) 941 const char *buf, size_t count)
958{ 942{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d84ff772c26f..72ddb031b69a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
56 char *); 56 char *);
57extern ssize_t tpm_show_temp_deactivated(struct device *, 57extern ssize_t tpm_show_temp_deactivated(struct device *,
58 struct device_attribute *attr, char *); 58 struct device_attribute *attr, char *);
59extern ssize_t tpm_show_timeouts(struct device *,
60 struct device_attribute *attr, char *);
61 59
62struct tpm_chip; 60struct tpm_chip;
63 61
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0d1d38e5f266..dd21df55689d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL); 376 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
379static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
380 379
381static struct attribute *tis_attrs[] = { 380static struct attribute *tis_attrs[] = {
382 &dev_attr_pubek.attr, 381 &dev_attr_pubek.attr,
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
386 &dev_attr_owned.attr, 385 &dev_attr_owned.attr,
387 &dev_attr_temp_deactivated.attr, 386 &dev_attr_temp_deactivated.attr,
388 &dev_attr_caps.attr, 387 &dev_attr_caps.attr,
389 &dev_attr_cancel.attr, 388 &dev_attr_cancel.attr, NULL,
390 &dev_attr_timeouts.attr, NULL,
391}; 389};
392 390
393static struct attribute_group tis_attr_grp = { 391static struct attribute_group tis_attr_grp = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f6848a43..5cb4d09919d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1919 1919
1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1920 ret = sysdev_driver_register(&cpu_sysdev_class,
1921 &cpufreq_sysdev_driver); 1921 &cpufreq_sysdev_driver);
1922 if (ret)
1923 goto err_null_driver;
1922 1924
1923 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1925 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1924 int i; 1926 int i;
1925 ret = -ENODEV; 1927 ret = -ENODEV;
1926 1928
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1935 if (ret) { 1937 if (ret) {
1936 dprintk("no CPU initialized for driver %s\n", 1938 dprintk("no CPU initialized for driver %s\n",
1937 driver_data->name); 1939 driver_data->name);
1938 sysdev_driver_unregister(&cpu_sysdev_class, 1940 goto err_sysdev_unreg;
1939 &cpufreq_sysdev_driver);
1940
1941 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1942 cpufreq_driver = NULL;
1943 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1944 } 1941 }
1945 } 1942 }
1946 1943
1947 if (!ret) { 1944 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1948 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1945 dprintk("driver %s up and running\n", driver_data->name);
1949 dprintk("driver %s up and running\n", driver_data->name); 1946 cpufreq_debug_enable_ratelimit();
1950 cpufreq_debug_enable_ratelimit();
1951 }
1952 1947
1948 return 0;
1949err_sysdev_unreg:
1950 sysdev_driver_unregister(&cpu_sysdev_class,
1951 &cpufreq_sysdev_driver);
1952err_null_driver:
1953 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1954 cpufreq_driver = NULL;
1955 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1953 return ret; 1956 return ret;
1954} 1957}
1955EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1958EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2a8528..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 * available. In that case we can't account for this and just 164 * available. In that case we can't account for this and just
165 * hope for the best. 165 * hope for the best.
166 */ 166 */
167 if ((vblrc > 0) && (abs(diff_ns) > 1000000)) 167 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
168 atomic_inc(&dev->_vblank_count[crtc]); 168 atomic_inc(&dev->_vblank_count[crtc]);
169 smp_mb__after_atomic_inc();
170 }
169 171
170 /* Invalidate all timestamps while vblank irq's are off. */ 172 /* Invalidate all timestamps while vblank irq's are off. */
171 clear_vblank_timestamps(dev, crtc); 173 clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
491 /* Dot clock in Hz: */ 493 /* Dot clock in Hz: */
492 dotclock = (u64) crtc->hwmode.clock * 1000; 494 dotclock = (u64) crtc->hwmode.clock * 1000;
493 495
496 /* Fields of interlaced scanout modes are only halve a frame duration.
497 * Double the dotclock to get halve the frame-/line-/pixelduration.
498 */
499 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
500 dotclock *= 2;
501
494 /* Valid dotclock? */ 502 /* Valid dotclock? */
495 if (dotclock > 0) { 503 if (dotclock > 0) {
496 /* Convert scanline length in pixels and video dot clock to 504 /* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
603 return -EAGAIN; 611 return -EAGAIN;
604 } 612 }
605 613
606 /* Don't know yet how to handle interlaced or
607 * double scan modes. Just no-op for now.
608 */
609 if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
610 DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
611 return -ENOTSUPP;
612 }
613
614 /* Get current scanout position with system timestamp. 614 /* Get current scanout position with system timestamp.
615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times 615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
616 * if single query takes longer than max_error nanoseconds. 616 * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
858 if (rc) { 858 if (rc) {
859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
860 vblanktimestamp(dev, crtc, tslot) = t_vblank; 860 vblanktimestamp(dev, crtc, tslot) = t_vblank;
861 smp_wmb();
862 } 861 }
863 862
863 smp_mb__before_atomic_inc();
864 atomic_add(diff, &dev->_vblank_count[crtc]); 864 atomic_add(diff, &dev->_vblank_count[crtc]);
865 smp_mb__after_atomic_inc();
865} 866}
866 867
867/** 868/**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1011 struct drm_file *file_priv) 1012 struct drm_file *file_priv)
1012{ 1013{
1013 struct drm_modeset_ctl *modeset = data; 1014 struct drm_modeset_ctl *modeset = data;
1014 int crtc, ret = 0; 1015 int ret = 0;
1016 unsigned int crtc;
1015 1017
1016 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1018 /* If drm_vblank_init() hasn't been called yet, just no-op */
1017 if (!dev->num_crtcs) 1019 if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1293 * e.g., due to spurious vblank interrupts. We need to 1295 * e.g., due to spurious vblank interrupts. We need to
1294 * ignore those for accounting. 1296 * ignore those for accounting.
1295 */ 1297 */
1296 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { 1298 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1297 /* Store new timestamp in ringbuffer. */ 1299 /* Store new timestamp in ringbuffer. */
1298 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; 1300 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1299 smp_wmb();
1300 1301
1301 /* Increment cooked vblank count. This also atomically commits 1302 /* Increment cooked vblank count. This also atomically commits
1302 * the timestamp computed above. 1303 * the timestamp computed above.
1303 */ 1304 */
1305 smp_mb__before_atomic_inc();
1304 atomic_inc(&dev->_vblank_count[crtc]); 1306 atomic_inc(&dev->_vblank_count[crtc]);
1307 smp_mb__after_atomic_inc();
1305 } else { 1308 } else {
1306 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1309 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1307 crtc, (int) diff_ns); 1310 crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1895 if (IS_GEN2(dev)) 1895 if (IS_GEN2(dev))
1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1897 1897
1898 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1899 * using 32bit addressing, overwriting memory if HWS is located
1900 * above 4GB.
1901 *
1902 * The documentation also mentions an issue with undefined
1903 * behaviour if any general state is accessed within a page above 4GB,
1904 * which also needs to be handled carefully.
1905 */
1906 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1907 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1908
1898 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1909 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1899 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1910 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1900 if (!dev_priv->regs) { 1911 if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..79a04fde69b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
184static bool 184static bool
185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{ 186{
187 int tile_width; 187 int tile_width, tile_height;
188 188
189 /* Linear is always fine */ 189 /* Linear is always fine */
190 if (tiling_mode == I915_TILING_NONE) 190 if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
215 } 215 }
216 } 216 }
217 217
218 if (IS_GEN2(dev) ||
219 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
220 tile_height = 32;
221 else
222 tile_height = 8;
223 /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
224 * number of tile rows. */
225 if (IS_GEN2(dev))
226 tile_height *= 2;
227
228 /* Size needs to be aligned to a full tile row */
229 if (size & (tile_height * stride - 1))
230 return false;
231
218 /* 965+ just needs multiples of tile width */ 232 /* 965+ just needs multiples of tile width */
219 if (INTEL_INFO(dev)->gen >= 4) { 233 if (INTEL_INFO(dev)->gen >= 4) {
220 if (stride & (tile_width - 1)) 234 if (stride & (tile_width - 1))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
316 struct drm_mode_config *mode_config = &dev->mode_config; 316 struct drm_mode_config *mode_config = &dev->mode_config;
317 struct intel_encoder *encoder; 317 struct intel_encoder *encoder;
318 318
319 DRM_DEBUG_KMS("running encoder hotplug functions\n");
320
319 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 321 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
320 if (encoder->hot_plug) 322 if (encoder->hot_plug)
321 encoder->hot_plug(encoder); 323 encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1649 } else { 1651 } else {
1650 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1652 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1651 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1653 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1652 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1654 hotplug_mask |= SDE_AUX_MASK;
1653 I915_WRITE(FDI_RXA_IMR, 0);
1654 I915_WRITE(FDI_RXB_IMR, 0);
1655 } 1655 }
1656 1656
1657 dev_priv->pch_irq_mask = ~hotplug_mask; 1657 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15d94c63918c..729d4233b763 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,17 +1553,7 @@
1553 1553
1554/* Backlight control */ 1554/* Backlight control */
1555#define BLC_PWM_CTL 0x61254 1555#define BLC_PWM_CTL 0x61254
1556#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1557#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1556#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1558#define BLM_COMBINATION_MODE (1 << 30)
1559/*
1560 * This is the most significant 15 bits of the number of backlight cycles in a
1561 * complete cycle of the modulated backlight control.
1562 *
1563 * The actual value is this field multiplied by two.
1564 */
1565#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1566#define BLM_LEGACY_MODE (1 << 16)
1567/* 1557/*
1568 * This is the number of cycles out of the backlight modulation cycle for which 1558 * This is the number of cycles out of the backlight modulation cycle for which
1569 * the backlight is on. 1559 * the backlight is on.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b006536b3d2..e79b25bbee6c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1631 1631
1632 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1633 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1634 1635
1635 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1636 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1637 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1638 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1639 */ 1643 */
1640 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1641 if (ret) { 1645 (void) ret;
1642 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645 }
1646 } 1646 }
1647 1647
1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2045 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2046} 2046}
2047 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2048static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2049{ 2074{
2050 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2053 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2054 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2055 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2056 2082
2057 if (intel_crtc->active) 2083 if (intel_crtc->active)
2058 return; 2084 return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2066 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2067 } 2093 }
2068 2094
2069 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2070 2145
2071 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2072 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2100 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2101 } 2176 }
2102 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2103 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2104 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2105 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2184 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2185 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2186 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2187 2266done:
2188 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2189 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2190 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
6496 POSTING_READ(RSTDBYCTL); 6575 POSTING_READ(RSTDBYCTL);
6497 } 6576 }
6498 6577
6499 ironlake_disable_rc6(dev); 6578 ironlake_teardown_rc6(dev);
6500} 6579}
6501 6580
6502static int ironlake_setup_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..d860abeda70f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
35void 33void
36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
37 struct drm_display_mode *adjusted_mode) 35 struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
112 dev_priv->pch_pf_size = (width << 16) | height; 110 dev_priv->pch_pf_size = (width << 16) | height;
113} 111}
114 112
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 113static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{ 114{
130 u32 val; 115 u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
181 if (INTEL_INFO(dev)->gen < 4) 166 if (INTEL_INFO(dev)->gen < 4)
182 max &= ~1; 167 max &= ~1;
183 } 168 }
184
185 if (is_backlight_combination_mode(dev))
186 max *= 0xff;
187 } 169 }
188 170
189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 171 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
202 if (IS_PINEVIEW(dev)) 184 if (IS_PINEVIEW(dev))
203 val >>= 1; 185 val >>= 1;
204
205 if (is_backlight_combination_mode(dev)){
206 u8 lbpc;
207
208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc;
211 val >>= 1;
212 }
213 } 186 }
214 187
215 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 188 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
232 205
233 if (HAS_PCH_SPLIT(dev)) 206 if (HAS_PCH_SPLIT(dev))
234 return intel_pch_panel_set_backlight(dev, level); 207 return intel_pch_panel_set_backlight(dev, level);
235
236 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc;
239
240 lpbc = level * 0xfe / max + 1;
241 level /= lpbc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
243 }
244
245 tmp = I915_READ(BLC_PWM_CTL); 208 tmp = I915_READ(BLC_PWM_CTL);
246 if (IS_PINEVIEW(dev)) { 209 if (IS_PINEVIEW(dev)) {
247 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 210 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9f9b0b..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma); 52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
53 kfree(nvbo); 56 kfree(nvbo);
54} 57}
55 58
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 56deae5bf02e..93fa735c8c1a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3490 track->num_texture = 16; 3490 track->num_texture = 16;
3491 track->maxy = 4096; 3491 track->maxy = 4096;
3492 track->separate_cube = 0; 3492 track->separate_cube = 0;
3493 track->aaresolve = true; 3493 track->aaresolve = false;
3494 track->aa.robj = NULL; 3494 track->aa.robj = NULL;
3495 } 3495 }
3496 3496
@@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
3801 r100_mc_program(rdev); 3801 r100_mc_program(rdev);
3802 /* Resume clock */ 3802 /* Resume clock */
3803 r100_clock_startup(rdev); 3803 r100_clock_startup(rdev);
3804 /* Initialize GPU configuration (# pipes, ...) */
3805// r100_gpu_init(rdev);
3806 /* Initialize GART (initialize after TTM so we can allocate 3804 /* Initialize GART (initialize after TTM so we can allocate
3807 * memory through TTM but finalize after TTM) */ 3805 * memory through TTM but finalize after TTM) */
3808 r100_enable_bm(rdev); 3806 r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0e657095de7c..3e7e7f9eb781 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
971 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
972 } 972 }
973 973
974 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { 974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
975 uint32_t ref_div; 975 uint32_t ref_div;
976 976
977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5bb5ba..cc44bdfec80f 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
113 u32 tiling_flags = 0; 113 u32 tiling_flags = 0;
114 int ret; 114 int ret;
115 int aligned_size, size; 115 int aligned_size, size;
116 int height = mode_cmd->height;
116 117
117 /* need to align pitch with crtc limits */ 118 /* need to align pitch with crtc limits */
118 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); 119 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
119 120
120 size = mode_cmd->pitch * mode_cmd->height; 121 if (rdev->family >= CHIP_R600)
122 height = ALIGN(mode_cmd->height, 8);
123 size = mode_cmd->pitch * height;
121 aligned_size = ALIGN(size, PAGE_SIZE); 124 aligned_size = ALIGN(size, PAGE_SIZE);
122 ret = radeon_gem_object_create(rdev, aligned_size, 0, 125 ret = radeon_gem_object_create(rdev, aligned_size, 0,
123 RADEON_GEM_DOMAIN_VRAM, 126 RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484f1646..297bc9a7d6e6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
238 will be called k8temp. 238 will be called k8temp.
239 239
240config SENSORS_K10TEMP 240config SENSORS_K10TEMP
241 tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" 241 tristate "AMD Family 10h/11h/12h/14h temperature sensor"
242 depends on X86 && PCI 242 depends on X86 && PCI
243 help 243 help
244 If you say yes here you get support for the temperature 244 If you say yes here you get support for the temperature
245 sensor(s) inside your CPU. Supported are later revisions of 245 sensor(s) inside your CPU. Supported are later revisions of
246 the AMD Family 10h and all revisions of the AMD Family 11h 246 the AMD Family 10h and all revisions of the AMD Family 11h,
247 microarchitectures. 247 12h (Llano), and 14h (Brazos) microarchitectures.
248 248
249 This driver can also be built as a module. If so, the module 249 This driver can also be built as a module. If so, the module
250 will be called k10temp. 250 will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
455 called jz4740-hwmon. 455 called jz4740-hwmon.
456 456
457config SENSORS_JC42 457config SENSORS_JC42
458 tristate "JEDEC JC42.4 compliant temperature sensors" 458 tristate "JEDEC JC42.4 compliant memory module temperature sensors"
459 depends on I2C 459 depends on I2C
460 help 460 help
461 If you say yes here you get support for Jedec JC42.4 compliant 461 If you say yes here, you get support for JEDEC JC42.4 compliant
462 temperature sensors. Support will include, but not be limited to, 462 temperature sensors, which are used on many DDR3 memory modules for
463 ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, 463 mobile devices and servers. Support will include, but not be limited
464 MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. 464 to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
465 MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
465 466
466 This driver can also be built as a module. If so, the module 467 This driver can also be built as a module. If so, the module
467 will be called jc42. 468 will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
574 help 575 help
575 If you say yes here you get support for National Semiconductor LM85 576 If you say yes here you get support for National Semiconductor LM85
576 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, 577 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
577 EMC6D101 and EMC6D102. 578 EMC6D101, EMC6D102, and EMC6D103.
578 579
579 This driver can also be built as a module. If so, the module 580 This driver can also be built as a module. If so, the module
580 will be called lm85. 581 will be called lm85.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
242 { "ad7414", 0 }, 242 { "ad7414", 0 },
243 {} 243 {}
244}; 244};
245MODULE_DEVICE_TABLE(i2c, ad7414_id);
245 246
246static struct i2c_driver ad7414_driver = { 247static struct i2c_driver ad7414_driver = {
247 .driver = { 248 .driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
334 { "adt7411", 0 }, 334 { "adt7411", 0 },
335 { } 335 { }
336}; 336};
337MODULE_DEVICE_TABLE(i2c, adt7411_id);
337 338
338static struct i2c_driver adt7411_driver = { 339static struct i2c_driver adt7411_driver = {
339 .driver = { 340 .driver = {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78c8dde..934991237061 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
53 53
54/* Configuration register defines */ 54/* Configuration register defines */
55#define JC42_CFG_CRIT_ONLY (1 << 2) 55#define JC42_CFG_CRIT_ONLY (1 << 2)
56#define JC42_CFG_TCRIT_LOCK (1 << 6)
57#define JC42_CFG_EVENT_LOCK (1 << 7)
56#define JC42_CFG_SHUTDOWN (1 << 8) 58#define JC42_CFG_SHUTDOWN (1 << 8)
57#define JC42_CFG_HYST_SHIFT 9 59#define JC42_CFG_HYST_SHIFT 9
58#define JC42_CFG_HYST_MASK 0x03 60#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
332{ 334{
333 struct i2c_client *client = to_i2c_client(dev); 335 struct i2c_client *client = to_i2c_client(dev);
334 struct jc42_data *data = i2c_get_clientdata(client); 336 struct jc42_data *data = i2c_get_clientdata(client);
335 long val; 337 unsigned long val;
336 int diff, hyst; 338 int diff, hyst;
337 int err; 339 int err;
338 int ret = count; 340 int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
380 382
381static DEVICE_ATTR(temp1_input, S_IRUGO, 383static DEVICE_ATTR(temp1_input, S_IRUGO,
382 show_temp_input, NULL); 384 show_temp_input, NULL);
383static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, 385static DEVICE_ATTR(temp1_crit, S_IRUGO,
384 show_temp_crit, set_temp_crit); 386 show_temp_crit, set_temp_crit);
385static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, 387static DEVICE_ATTR(temp1_min, S_IRUGO,
386 show_temp_min, set_temp_min); 388 show_temp_min, set_temp_min);
387static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, 389static DEVICE_ATTR(temp1_max, S_IRUGO,
388 show_temp_max, set_temp_max); 390 show_temp_max, set_temp_max);
389 391
390static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, 392static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
391 show_temp_crit_hyst, set_temp_crit_hyst); 393 show_temp_crit_hyst, set_temp_crit_hyst);
392static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, 394static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
393 show_temp_max_hyst, NULL); 395 show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
412 NULL 414 NULL
413}; 415};
414 416
417static mode_t jc42_attribute_mode(struct kobject *kobj,
418 struct attribute *attr, int index)
419{
420 struct device *dev = container_of(kobj, struct device, kobj);
421 struct i2c_client *client = to_i2c_client(dev);
422 struct jc42_data *data = i2c_get_clientdata(client);
423 unsigned int config = data->config;
424 bool readonly;
425
426 if (attr == &dev_attr_temp1_crit.attr)
427 readonly = config & JC42_CFG_TCRIT_LOCK;
428 else if (attr == &dev_attr_temp1_min.attr ||
429 attr == &dev_attr_temp1_max.attr)
430 readonly = config & JC42_CFG_EVENT_LOCK;
431 else if (attr == &dev_attr_temp1_crit_hyst.attr)
432 readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
433 else
434 readonly = true;
435
436 return S_IRUGO | (readonly ? 0 : S_IWUSR);
437}
438
415static const struct attribute_group jc42_group = { 439static const struct attribute_group jc42_group = {
416 .attrs = jc42_attributes, 440 .attrs = jc42_attributes,
441 .is_visible = jc42_attribute_mode,
417}; 442};
418 443
419/* Return 0 if detection is successful, -ENODEV otherwise */ 444/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a2404cd3e..82bf65aa2968 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * k10temp.c - AMD Family 10h/11h processor hardware monitoring 2 * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
3 * 3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> 4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 * 5 *
@@ -25,7 +25,7 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27 27
28MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); 28MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
208static const struct pci_device_id k10temp_id_table[] = { 208static const struct pci_device_id k10temp_id_table[] = {
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
211 {} 212 {}
212}; 213};
213MODULE_DEVICE_TABLE(pci, k10temp_id_table); 214MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e229847f37a..d2cc28660816 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
41enum chips { 41enum chips {
42 any_chip, lm85b, lm85c, 42 any_chip, lm85b, lm85c,
43 adm1027, adt7463, adt7468, 43 adm1027, adt7463, adt7468,
44 emc6d100, emc6d102 44 emc6d100, emc6d102, emc6d103
45}; 45};
46 46
47/* The LM85 registers */ 47/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
90#define LM85_VERSTEP_EMC6D100_A0 0x60 90#define LM85_VERSTEP_EMC6D100_A0 0x60
91#define LM85_VERSTEP_EMC6D100_A1 0x61 91#define LM85_VERSTEP_EMC6D100_A1 0x61
92#define LM85_VERSTEP_EMC6D102 0x65 92#define LM85_VERSTEP_EMC6D102 0x65
93#define LM85_VERSTEP_EMC6D103_A0 0x68
94#define LM85_VERSTEP_EMC6D103_A1 0x69
95#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
93 96
94#define LM85_REG_CONFIG 0x40 97#define LM85_REG_CONFIG 0x40
95 98
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
348 { "emc6d100", emc6d100 }, 351 { "emc6d100", emc6d100 },
349 { "emc6d101", emc6d100 }, 352 { "emc6d101", emc6d100 },
350 { "emc6d102", emc6d102 }, 353 { "emc6d102", emc6d102 },
354 { "emc6d103", emc6d103 },
351 { } 355 { }
352}; 356};
353MODULE_DEVICE_TABLE(i2c, lm85_id); 357MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1250 case LM85_VERSTEP_EMC6D102: 1254 case LM85_VERSTEP_EMC6D102:
1251 type_name = "emc6d102"; 1255 type_name = "emc6d102";
1252 break; 1256 break;
1257 case LM85_VERSTEP_EMC6D103_A0:
1258 case LM85_VERSTEP_EMC6D103_A1:
1259 type_name = "emc6d103";
1260 break;
1261 /*
1262 * Registers apparently missing in EMC6D103S/EMC6D103:A2
1263 * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
1264 * (according to the data sheets), but used unconditionally
1265 * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
1266 * So skip EMC6D103S for now.
1267 case LM85_VERSTEP_EMC6D103S:
1268 type_name = "emc6d103s";
1269 break;
1270 */
1253 } 1271 }
1254 } else { 1272 } else {
1255 dev_dbg(&adapter->dev, 1273 dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
1283 case adt7468: 1301 case adt7468:
1284 case emc6d100: 1302 case emc6d100:
1285 case emc6d102: 1303 case emc6d102:
1304 case emc6d103:
1286 data->freq_map = adm1027_freq_map; 1305 data->freq_map = adm1027_freq_map;
1287 break; 1306 break;
1288 default: 1307 default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1468 /* More alarm bits */ 1487 /* More alarm bits */
1469 data->alarms |= lm85_read_value(client, 1488 data->alarms |= lm85_read_value(client,
1470 EMC6D100_REG_ALARM3) << 16; 1489 EMC6D100_REG_ALARM3) << 16;
1471 } else if (data->type == emc6d102) { 1490 } else if (data->type == emc6d102 || data->type == emc6d103) {
1472 /* Have to read LSB bits after the MSB ones because 1491 /* Have to read LSB bits after the MSB ones because
1473 the reading of the MSB bits has frozen the 1492 the reading of the MSB bits has frozen the
1474 LSBs (backward from the ADM1027). 1493 LSBs (backward from the ADM1027).
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3a1fa0..829a2a1029f7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -847,11 +847,15 @@ complete:
847 dev_err(dev->dev, "Arbitration lost\n"); 847 dev_err(dev->dev, "Arbitration lost\n");
848 err |= OMAP_I2C_STAT_AL; 848 err |= OMAP_I2C_STAT_AL;
849 } 849 }
850 /*
851 * ProDB0017052: Clear ARDY bit twice
852 */
850 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 853 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
851 OMAP_I2C_STAT_AL)) { 854 OMAP_I2C_STAT_AL)) {
852 omap_i2c_ack_stat(dev, stat & 855 omap_i2c_ack_stat(dev, stat &
853 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | 856 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
854 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); 857 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
858 OMAP_I2C_STAT_ARDY));
855 omap_i2c_complete_cmd(dev, err); 859 omap_i2c_complete_cmd(dev, err);
856 return IRQ_HANDLED; 860 return IRQ_HANDLED;
857 } 861 }
@@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
1137 return 0; 1141 return 0;
1138} 1142}
1139 1143
1144#ifdef CONFIG_SUSPEND
1145static int omap_i2c_suspend(struct device *dev)
1146{
1147 if (!pm_runtime_suspended(dev))
1148 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
1149 dev->bus->pm->runtime_suspend(dev);
1150
1151 return 0;
1152}
1153
1154static int omap_i2c_resume(struct device *dev)
1155{
1156 if (!pm_runtime_suspended(dev))
1157 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
1158 dev->bus->pm->runtime_resume(dev);
1159
1160 return 0;
1161}
1162
1163static struct dev_pm_ops omap_i2c_pm_ops = {
1164 .suspend = omap_i2c_suspend,
1165 .resume = omap_i2c_resume,
1166};
1167#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1168#else
1169#define OMAP_I2C_PM_OPS NULL
1170#endif
1171
1140static struct platform_driver omap_i2c_driver = { 1172static struct platform_driver omap_i2c_driver = {
1141 .probe = omap_i2c_probe, 1173 .probe = omap_i2c_probe,
1142 .remove = omap_i2c_remove, 1174 .remove = omap_i2c_remove,
1143 .driver = { 1175 .driver = {
1144 .name = "omap_i2c", 1176 .name = "omap_i2c",
1145 .owner = THIS_MODULE, 1177 .owner = THIS_MODULE,
1178 .pm = OMAP_I2C_PM_OPS,
1146 }, 1179 },
1147}; 1180};
1148 1181
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be451d326..266135ddf7fa 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
942 adap->owner = THIS_MODULE; 942 adap->owner = THIS_MODULE;
943 /* DDC class but actually often used for more generic I2C */ 943 /* DDC class but actually often used for more generic I2C */
944 adap->class = I2C_CLASS_DDC; 944 adap->class = I2C_CLASS_DDC;
945 strncpy(adap->name, "ST Microelectronics DDC I2C adapter", 945 strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
946 sizeof(adap->name)); 946 sizeof(adap->name));
947 adap->nr = bus_nr; 947 adap->nr = bus_nr;
948 adap->algo = &stu300_algo; 948 adap->algo = &stu300_algo;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e05690..4a5c4a44ffb1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
62#include <linux/notifier.h> 62#include <linux/notifier.h>
63#include <linux/cpu.h> 63#include <linux/cpu.h>
64#include <asm/mwait.h> 64#include <asm/mwait.h>
65#include <asm/msr.h>
65 66
66#define INTEL_IDLE_VERSION "0.4" 67#define INTEL_IDLE_VERSION "0.4"
67#define PREFIX "intel_idle: " 68#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
85static struct cpuidle_state *cpuidle_state_table; 86static struct cpuidle_state *cpuidle_state_table;
86 87
87/* 88/*
89 * Hardware C-state auto-demotion may not always be optimal.
90 * Indicate which enable bits to clear here.
91 */
92static unsigned long long auto_demotion_disable_flags;
93
94/*
88 * Set this flag for states where the HW flushes the TLB for us 95 * Set this flag for states where the HW flushes the TLB for us
89 * and so we don't need cross-calls to keep it consistent. 96 * and so we don't need cross-calls to keep it consistent.
90 * If this flag is set, SW flushes the TLB, so even if the 97 * If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
281 .notifier_call = setup_broadcast_cpuhp_notify, 288 .notifier_call = setup_broadcast_cpuhp_notify,
282}; 289};
283 290
291static void auto_demotion_disable(void *dummy)
292{
293 unsigned long long msr_bits;
294
295 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
296 msr_bits &= ~auto_demotion_disable_flags;
297 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
298}
299
284/* 300/*
285 * intel_idle_probe() 301 * intel_idle_probe()
286 */ 302 */
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
324 case 0x25: /* Westmere */ 340 case 0x25: /* Westmere */
325 case 0x2C: /* Westmere */ 341 case 0x2C: /* Westmere */
326 cpuidle_state_table = nehalem_cstates; 342 cpuidle_state_table = nehalem_cstates;
343 auto_demotion_disable_flags =
344 (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
327 break; 345 break;
328 346
329 case 0x1C: /* 28 - Atom Processor */ 347 case 0x1C: /* 28 - Atom Processor */
348 cpuidle_state_table = atom_cstates;
349 break;
350
330 case 0x26: /* 38 - Lincroft Atom Processor */ 351 case 0x26: /* 38 - Lincroft Atom Processor */
331 cpuidle_state_table = atom_cstates; 352 cpuidle_state_table = atom_cstates;
353 auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
332 break; 354 break;
333 355
334 case 0x2A: /* SNB */ 356 case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
436 return -EIO; 458 return -EIO;
437 } 459 }
438 } 460 }
461 if (auto_demotion_disable_flags)
462 smp_call_function(auto_demotion_disable, NULL, 1);
439 463
440 return 0; 464 return 0;
441} 465}
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8aba0ba57de5..2d749937a969 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -193,10 +193,11 @@ static int addr4_resolve(struct sockaddr_in *src_in,
193 fl.nl_u.ip4_u.saddr = src_ip; 193 fl.nl_u.ip4_u.saddr = src_ip;
194 fl.oif = addr->bound_dev_if; 194 fl.oif = addr->bound_dev_if;
195 195
196 ret = ip_route_output_key(&init_net, &rt, &fl); 196 rt = ip_route_output_key(&init_net, &fl);
197 if (ret) 197 if (IS_ERR(rt)) {
198 ret = PTR_ERR(rt);
198 goto out; 199 goto out;
199 200 }
200 src_in->sin_family = AF_INET; 201 src_in->sin_family = AF_INET;
201 src_in->sin_addr.s_addr = rt->rt_src; 202 src_in->sin_addr.s_addr = rt->rt_src;
202 203
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d02dcc6e5963..e0ccbc53fbcc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -354,7 +354,8 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
354 } 354 }
355 }; 355 };
356 356
357 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 357 rt = ip_route_output_flow(&init_net, &fl, NULL);
358 if (IS_ERR(rt))
358 return NULL; 359 return NULL;
359 return rt; 360 return rt;
360} 361}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8b00e6c46f01..77b0eef2aad9 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -331,7 +331,8 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
331 } 331 }
332 }; 332 };
333 333
334 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 334 rt = ip_route_output_flow(&init_net, &fl, NULL);
335 if (IS_ERR(rt))
335 return NULL; 336 return NULL;
336 return rt; 337 return rt;
337} 338}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index ec3aa11c36cb..e81599cb1fe6 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1112,7 +1112,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1112 1112
1113 memset(&fl, 0, sizeof fl); 1113 memset(&fl, 0, sizeof fl);
1114 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1114 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1115 if (ip_route_output_key(&init_net, &rt, &fl)) { 1115 rt = ip_route_output_key(&init_net, &fl);
1116 if (IS_ERR(rt)) {
1116 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", 1117 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
1117 __func__, dst_ip); 1118 __func__, dst_ip);
1118 return rc; 1119 return rc;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc933ec..5b8f59d6c3e8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
360 event->owner = owner; 360 event->owner = owner;
361 361
362 list_add_tail(&event->node, &gameport_event_list); 362 list_add_tail(&event->node, &gameport_event_list);
363 schedule_work(&gameport_event_work); 363 queue_work(system_long_wq, &gameport_event_work);
364 364
365out: 365out:
366 spin_unlock_irqrestore(&gameport_event_lock, flags); 366 spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b77c18e..99ce9032d08c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
71 spinlock_t lock; 71 spinlock_t lock;
72 unsigned int repoll_dly; 72 unsigned int repoll_dly;
73 unsigned long cp_dly_jiffies; 73 unsigned long cp_dly_jiffies;
74 bool use_fn_map;
74 const struct tegra_kbc_platform_data *pdata; 75 const struct tegra_kbc_platform_data *pdata;
75 unsigned short keycode[KBC_MAX_KEY]; 76 unsigned short keycode[KBC_MAX_KEY * 2];
76 unsigned short current_keys[KBC_MAX_KPENT]; 77 unsigned short current_keys[KBC_MAX_KPENT];
77 unsigned int num_pressed_keys; 78 unsigned int num_pressed_keys;
78 struct timer_list timer; 79 struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
178 KEY(15, 5, KEY_F2), 179 KEY(15, 5, KEY_F2),
179 KEY(15, 6, KEY_CAPSLOCK), 180 KEY(15, 6, KEY_CAPSLOCK),
180 KEY(15, 7, KEY_F6), 181 KEY(15, 7, KEY_F6),
182
183 /* Software Handled Function Keys */
184 KEY(20, 0, KEY_KP7),
185
186 KEY(21, 0, KEY_KP9),
187 KEY(21, 1, KEY_KP8),
188 KEY(21, 2, KEY_KP4),
189 KEY(21, 4, KEY_KP1),
190
191 KEY(22, 1, KEY_KPSLASH),
192 KEY(22, 2, KEY_KP6),
193 KEY(22, 3, KEY_KP5),
194 KEY(22, 4, KEY_KP3),
195 KEY(22, 5, KEY_KP2),
196 KEY(22, 7, KEY_KP0),
197
198 KEY(27, 1, KEY_KPASTERISK),
199 KEY(27, 3, KEY_KPMINUS),
200 KEY(27, 4, KEY_KPPLUS),
201 KEY(27, 5, KEY_KPDOT),
202
203 KEY(28, 5, KEY_VOLUMEUP),
204
205 KEY(29, 3, KEY_HOME),
206 KEY(29, 4, KEY_END),
207 KEY(29, 5, KEY_BRIGHTNESSDOWN),
208 KEY(29, 6, KEY_VOLUMEDOWN),
209 KEY(29, 7, KEY_BRIGHTNESSUP),
210
211 KEY(30, 0, KEY_NUMLOCK),
212 KEY(30, 1, KEY_SCROLLLOCK),
213 KEY(30, 2, KEY_MUTE),
214
215 KEY(31, 4, KEY_HELP),
181}; 216};
182 217
183static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { 218static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
224 unsigned int i; 259 unsigned int i;
225 unsigned int num_down = 0; 260 unsigned int num_down = 0;
226 unsigned long flags; 261 unsigned long flags;
262 bool fn_keypress = false;
227 263
228 spin_lock_irqsave(&kbc->lock, flags); 264 spin_lock_irqsave(&kbc->lock, flags);
229 for (i = 0; i < KBC_MAX_KPENT; i++) { 265 for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
237 MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); 273 MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
238 274
239 scancodes[num_down] = scancode; 275 scancodes[num_down] = scancode;
240 keycodes[num_down++] = kbc->keycode[scancode]; 276 keycodes[num_down] = kbc->keycode[scancode];
277 /* If driver uses Fn map, do not report the Fn key. */
278 if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
279 fn_keypress = true;
280 else
281 num_down++;
241 } 282 }
242 283
243 val >>= 8; 284 val >>= 8;
244 } 285 }
286
287 /*
288 * If the platform uses Fn keymaps, translate keys on a Fn keypress.
289 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
290 */
291 if (fn_keypress) {
292 for (i = 0; i < num_down; i++) {
293 scancodes[i] += KBC_MAX_KEY;
294 keycodes[i] = kbc->keycode[scancodes[i]];
295 }
296 }
297
245 spin_unlock_irqrestore(&kbc->lock, flags); 298 spin_unlock_irqrestore(&kbc->lock, flags);
246 299
247 tegra_kbc_report_released_keys(kbc->idev, 300 tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
594 647
595 input_dev->keycode = kbc->keycode; 648 input_dev->keycode = kbc->keycode;
596 input_dev->keycodesize = sizeof(kbc->keycode[0]); 649 input_dev->keycodesize = sizeof(kbc->keycode[0]);
597 input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); 650 input_dev->keycodemax = KBC_MAX_KEY;
651 if (pdata->use_fn_map)
652 input_dev->keycodemax *= 2;
598 653
654 kbc->use_fn_map = pdata->use_fn_map;
599 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; 655 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
600 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, 656 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
601 input_dev->keycode, input_dev->keybit); 657 input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d042a72c..7453938bf5ef 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
54
55/*
56 * The following describes response for the 0x0c query.
57 *
58 * byte mask name meaning
59 * ---- ---- ------- ------------
60 * 1 0x01 adjustable threshold capacitive button sensitivity
61 * can be adjusted
62 * 1 0x02 report max query 0x0d gives max coord reported
63 * 1 0x04 clearpad sensor is ClearPad product
64 * 1 0x08 advanced gesture not particularly meaningful
65 * 1 0x10 clickpad bit 0 1-button ClickPad
66 * 1 0x60 multifinger mode identifies firmware finger counting
67 * (not reporting!) algorithm.
68 * Not particularly meaningful
69 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
70 * 2 0x01 clickpad bit 1 2-button ClickPad
71 * 2 0x02 deluxe LED controls touchpad support LED commands
72 * ala multimedia control bar
73 * 2 0x04 reduced filtering firmware does less filtering on
74 * position data, driver should watch
75 * for noise.
76 */
54#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ 77#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
55#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ 78#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
56#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) 79#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 7c38d1fbabf2..ba70058e2be3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
299 event->owner = owner; 299 event->owner = owner;
300 300
301 list_add_tail(&event->node, &serio_event_list); 301 list_add_tail(&event->node, &serio_event_list);
302 schedule_work(&serio_event_work); 302 queue_work(system_long_wq, &serio_event_work);
303 303
304out: 304out:
305 spin_unlock_irqrestore(&serio_event_lock, flags); 305 spin_unlock_irqrestore(&serio_event_lock, flags);
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798442fa..7bd5baa547be 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
62 stream interface. 62 stream interface.
63 If synchronous service was requested, then function 63 If synchronous service was requested, then function
64 does return amount of data written to stream. 64 does return amount of data written to stream.
65 'final' does indicate that pice of data to be written is 65 'final' does indicate that piece of data to be written is
66 final part of frame (necessary only by structured datatransfer) 66 final part of frame (necessary only by structured datatransfer)
67 return 0 if zero lengh packet was written 67 return 0 if zero lengh packet was written
68 return -1 if stream is full 68 return -1 if stream is full
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 049eaf12aaab..1f23e048f077 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
134{ 134{
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 136
137 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 137 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
138 return; 138 return;
139 139
140 spin_lock(&receiving_list_lock); 140 spin_lock(&receiving_list_lock);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
216 216
217 if (md_check_no_bitmap(mddev)) 217 if (md_check_no_bitmap(mddev))
218 return -EINVAL; 218 return -EINVAL;
219 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
220 conf = linear_conf(mddev, mddev->raid_disks); 219 conf = linear_conf(mddev, mddev->raid_disks);
221 220
222 if (!conf) 221 if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
553{ 553{
554 mddev_t *mddev, *new = NULL; 554 mddev_t *mddev, *new = NULL;
555 555
556 if (unit && MAJOR(unit) != MD_MAJOR)
557 unit &= ~((1<<MdpMinorShift)-1);
558
556 retry: 559 retry:
557 spin_lock(&all_mddevs_lock); 560 spin_lock(&all_mddevs_lock);
558 561
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
4138 } 4141 }
4139 4142
4140 mddev->array_sectors = sectors; 4143 mddev->array_sectors = sectors;
4141 set_capacity(mddev->gendisk, mddev->array_sectors); 4144 if (mddev->pers) {
4142 if (mddev->pers) 4145 set_capacity(mddev->gendisk, mddev->array_sectors);
4143 revalidate_disk(mddev->gendisk); 4146 revalidate_disk(mddev->gendisk);
4144 4147 }
4145 return len; 4148 return len;
4146} 4149}
4147 4150
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
4624 } 4627 }
4625 set_capacity(mddev->gendisk, mddev->array_sectors); 4628 set_capacity(mddev->gendisk, mddev->array_sectors);
4626 revalidate_disk(mddev->gendisk); 4629 revalidate_disk(mddev->gendisk);
4630 mddev->changed = 1;
4627 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4631 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4628out: 4632out:
4629 return err; 4633 return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
4712 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4716 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4713 mddev->recovery = 0; 4717 mddev->recovery = 0;
4714 mddev->in_sync = 0; 4718 mddev->in_sync = 0;
4719 mddev->changed = 0;
4715 mddev->degraded = 0; 4720 mddev->degraded = 0;
4716 mddev->safemode = 0; 4721 mddev->safemode = 0;
4717 mddev->bitmap_info.offset = 0; 4722 mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4827 4832
4828 set_capacity(disk, 0); 4833 set_capacity(disk, 0);
4829 mutex_unlock(&mddev->open_mutex); 4834 mutex_unlock(&mddev->open_mutex);
4835 mddev->changed = 1;
4830 revalidate_disk(disk); 4836 revalidate_disk(disk);
4831 4837
4832 if (mddev->ro) 4838 if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
6011 atomic_inc(&mddev->openers); 6017 atomic_inc(&mddev->openers);
6012 mutex_unlock(&mddev->open_mutex); 6018 mutex_unlock(&mddev->open_mutex);
6013 6019
6014 check_disk_size_change(mddev->gendisk, bdev); 6020 check_disk_change(bdev);
6015 out: 6021 out:
6016 return err; 6022 return err;
6017} 6023}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
6026 6032
6027 return 0; 6033 return 0;
6028} 6034}
6035
6036static int md_media_changed(struct gendisk *disk)
6037{
6038 mddev_t *mddev = disk->private_data;
6039
6040 return mddev->changed;
6041}
6042
6043static int md_revalidate(struct gendisk *disk)
6044{
6045 mddev_t *mddev = disk->private_data;
6046
6047 mddev->changed = 0;
6048 return 0;
6049}
6029static const struct block_device_operations md_fops = 6050static const struct block_device_operations md_fops =
6030{ 6051{
6031 .owner = THIS_MODULE, 6052 .owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
6036 .compat_ioctl = md_compat_ioctl, 6057 .compat_ioctl = md_compat_ioctl,
6037#endif 6058#endif
6038 .getgeo = md_getgeo, 6059 .getgeo = md_getgeo,
6060 .media_changed = md_media_changed,
6061 .revalidate_disk= md_revalidate,
6039}; 6062};
6040 6063
6041static int md_thread(void * arg) 6064static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b8593b2a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
274 atomic_t active; /* general refcount */ 274 atomic_t active; /* general refcount */
275 atomic_t openers; /* number of active opens */ 275 atomic_t openers; /* number of active opens */
276 276
277 int changed; /* True if we might need to
278 * reread partition info */
277 int degraded; /* whether md should consider 279 int degraded; /* whether md should consider
278 * adding a spare 280 * adding a spare
279 */ 281 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
435 * bookkeeping area. [whatever we allocate in multipath_run(), 435 * bookkeeping area. [whatever we allocate in multipath_run(),
436 * should be freed in multipath_stop()] 436 * should be freed in multipath_stop()]
437 */ 437 */
438 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
439 438
440 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 439 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
441 mddev->private = conf; 440 mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
361 if (md_check_no_bitmap(mddev)) 361 if (md_check_no_bitmap(mddev))
362 return -EINVAL; 362 return -EINVAL;
363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
364 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
365 364
366 /* if private is not null, we are here after takeover */ 365 /* if private is not null, we are here after takeover */
367 if (mddev->private == NULL) { 366 if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
670 mddev->new_layout = 0; 669 mddev->new_layout = 0;
671 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ 670 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
672 mddev->delta_disks = 1 - mddev->raid_disks; 671 mddev->delta_disks = 1 - mddev->raid_disks;
672 mddev->raid_disks = 1;
673 /* make sure it will be not marked as dirty */ 673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector; 674 mddev->recovery_cp = MaxSector;
675 675
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
593 if (conf->pending_bio_list.head) { 593 if (conf->pending_bio_list.head) {
594 struct bio *bio; 594 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 595 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
596 blk_remove_plug(conf->mddev->queue); 598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
597 spin_unlock_irq(&conf->device_lock); 600 spin_unlock_irq(&conf->device_lock);
598 /* flush any pending bitmap writes to 601 /* flush any pending bitmap writes to
599 * disk before proceeding w/ I/O */ 602 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_inc(&r1_bio->remaining); 962 atomic_inc(&r1_bio->remaining);
960 spin_lock_irqsave(&conf->device_lock, flags); 963 spin_lock_irqsave(&conf->device_lock, flags);
961 bio_list_add(&conf->pending_bio_list, mbio); 964 bio_list_add(&conf->pending_bio_list, mbio);
962 blk_plug_device(mddev->queue); 965 blk_plug_device_unlocked(mddev->queue);
963 spin_unlock_irqrestore(&conf->device_lock, flags); 966 spin_unlock_irqrestore(&conf->device_lock, flags);
964 } 967 }
965 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
2021 if (IS_ERR(conf)) 2024 if (IS_ERR(conf))
2022 return PTR_ERR(conf); 2025 return PTR_ERR(conf);
2023 2026
2024 mddev->queue->queue_lock = &conf->device_lock;
2025 list_for_each_entry(rdev, &mddev->disks, same_set) { 2027 list_for_each_entry(rdev, &mddev->disks, same_set) {
2026 disk_stack_limits(mddev->gendisk, rdev->bdev, 2028 disk_stack_limits(mddev->gendisk, rdev->bdev,
2027 rdev->data_offset << 9); 2029 rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
662 if (conf->pending_bio_list.head) { 662 if (conf->pending_bio_list.head) {
663 struct bio *bio; 663 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 664 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
665 blk_remove_plug(conf->mddev->queue); 667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
666 spin_unlock_irq(&conf->device_lock); 669 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 670 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 671 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 974 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 975 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 976 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue); 977 blk_plug_device_unlocked(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 978 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 979 }
977 980
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
2304 if (!conf) 2307 if (!conf)
2305 goto out; 2308 goto out;
2306 2309
2307 mddev->queue->queue_lock = &conf->device_lock;
2308
2309 mddev->thread = conf->thread; 2310 mddev->thread = conf->thread;
2310 conf->thread = NULL; 2311 conf->thread = NULL;
2311 2312
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
5204 5204
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5205 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->queue_lock = &conf->device_lock;
5208 mddev->queue->unplug_fn = raid5_unplug_queue; 5207 mddev->queue->unplug_fn = raid5_unplug_queue;
5209 5208
5210 chunk_size = mddev->chunk_sectors << 9; 5209 chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f94042612..c45e6305b26f 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
143 unsigned long flags; 143 unsigned long flags;
144 struct asic3 *asic; 144 struct asic3 *asic;
145 145
146 desc->chip->ack(irq); 146 desc->irq_data.chip->irq_ack(&desc->irq_data);
147 147
148 asic = desc->handler_data; 148 asic = get_irq_data(irq);
149 149
150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
151 u32 status; 151 u32 status;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d215c7..fdd8a1b8bc67 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
118 118
119 /* Voice codec interface client */ 119 /* Voice codec interface client */
120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
121 cell->name = "davinci_vcif"; 121 cell->name = "davinci-vcif";
122 cell->driver_data = davinci_vc; 122 cell->driver_data = davinci_vc;
123 123
124 /* Voice codec CQ93VC client */ 124 /* Voice codec CQ93VC client */
125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
126 cell->name = "cq93vc"; 126 cell->name = "cq93vc-codec";
127 cell->driver_data = davinci_vc; 127 cell->driver_data = davinci_vc;
128 128
129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf577b16d..e9018d1394ee 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
150static inline int __tps6586x_writes(struct i2c_client *client, int reg, 150static inline int __tps6586x_writes(struct i2c_client *client, int reg,
151 int len, uint8_t *val) 151 int len, uint8_t *val)
152{ 152{
153 int ret; 153 int ret, i;
154 154
155 ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); 155 for (i = 0; i < len; i++) {
156 if (ret < 0) { 156 ret = __tps6586x_write(client, reg + i, *(val + i));
157 dev_err(&client->dev, "failed writings to 0x%02x\n", reg); 157 if (ret < 0)
158 return ret; 158 return ret;
159 } 159 }
160 160
161 return 0; 161 return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb414a78a..92b85e28a15e 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
385 idev->close = ucb1x00_ts_close; 385 idev->close = ucb1x00_ts_close;
386 386
387 __set_bit(EV_ABS, idev->evbit); 387 __set_bit(EV_ABS, idev->evbit);
388 __set_bit(ABS_X, idev->absbit);
389 __set_bit(ABS_Y, idev->absbit);
390 __set_bit(ABS_PRESSURE, idev->absbit);
391 388
392 input_set_drvdata(idev, ts); 389 input_set_drvdata(idev, ts);
393 390
391 ucb1x00_adc_enable(ts->ucb);
392 ts->x_res = ucb1x00_ts_read_xres(ts);
393 ts->y_res = ucb1x00_ts_read_yres(ts);
394 ucb1x00_adc_disable(ts->ucb);
395
396 input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
397 input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
398 input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
399
394 err = input_register_device(idev); 400 err = input_register_device(idev);
395 if (err) 401 if (err)
396 goto fail; 402 goto fail;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7fa581..f4016a075fd6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 246 struct wm8994 *wm8994 = dev_get_drvdata(dev);
247 int ret; 247 int ret;
248 248
249 /* Don't actually go through with the suspend if the CODEC is
250 * still active (eg, for audio passthrough from CP. */
251 ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
252 if (ret < 0) {
253 dev_err(dev, "Failed to read power status: %d\n", ret);
254 } else if (ret & WM8994_VMID_SEL_MASK) {
255 dev_dbg(dev, "CODEC still active, ignoring suspend\n");
256 return 0;
257 }
258
249 /* GPIO configuration state is saved here since we may be configuring 259 /* GPIO configuration state is saved here since we may be configuring
250 * the GPIO alternate functions even if we're not using the gpiolib 260 * the GPIO alternate functions even if we're not using the gpiolib
251 * driver for them. 261 * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
261 if (ret < 0) 271 if (ret < 0)
262 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 272 dev_err(dev, "Failed to save LDO registers: %d\n", ret);
263 273
274 wm8994->suspended = true;
275
264 ret = regulator_bulk_disable(wm8994->num_supplies, 276 ret = regulator_bulk_disable(wm8994->num_supplies,
265 wm8994->supplies); 277 wm8994->supplies);
266 if (ret != 0) { 278 if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
276 struct wm8994 *wm8994 = dev_get_drvdata(dev); 288 struct wm8994 *wm8994 = dev_get_drvdata(dev);
277 int ret; 289 int ret;
278 290
291 /* We may have lied to the PM core about suspending */
292 if (!wm8994->suspended)
293 return 0;
294
279 ret = regulator_bulk_enable(wm8994->num_supplies, 295 ret = regulator_bulk_enable(wm8994->num_supplies,
280 wm8994->supplies); 296 wm8994->supplies);
281 if (ret != 0) { 297 if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
298 if (ret < 0) 314 if (ret < 0)
299 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 315 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
300 316
317 wm8994->suspended = false;
318
301 return 0; 319 return 0;
302} 320}
303#endif 321#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f4b39274308a..925c25c295f0 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2008,6 +2008,15 @@ config BCM63XX_ENET
2008 This driver supports the ethernet MACs in the Broadcom 63xx 2008 This driver supports the ethernet MACs in the Broadcom 63xx
2009 MIPS chipset family (BCM63XX). 2009 MIPS chipset family (BCM63XX).
2010 2010
2011config FTMAC100
2012 tristate "Faraday FTMAC100 10/100 Ethernet support"
2013 depends on ARM
2014 select MII
2015 help
2016 This driver supports the FTMAC100 10/100 Ethernet controller
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's.
2019
2011source "drivers/net/fs_enet/Kconfig" 2020source "drivers/net/fs_enet/Kconfig"
2012 2021
2013source "drivers/net/octeon/Kconfig" 2022source "drivers/net/octeon/Kconfig"
@@ -2100,6 +2109,7 @@ config E1000
2100config E1000E 2109config E1000E
2101 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 2110 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
2102 depends on PCI && (!SPARC32 || BROKEN) 2111 depends on PCI && (!SPARC32 || BROKEN)
2112 select CRC32
2103 ---help--- 2113 ---help---
2104 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 2114 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
2105 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 2115 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2236,15 +2246,6 @@ config R8169
2236 To compile this driver as a module, choose M here: the module 2246 To compile this driver as a module, choose M here: the module
2237 will be called r8169. This is recommended. 2247 will be called r8169. This is recommended.
2238 2248
2239config R8169_VLAN
2240 bool "VLAN support"
2241 depends on R8169 && VLAN_8021Q
2242 ---help---
2243 Say Y here for the r8169 driver to support the functions required
2244 by the kernel 802.1Q code.
2245
2246 If in doubt, say Y.
2247
2248config SB1250_MAC 2249config SB1250_MAC
2249 tristate "SB1250 Gigabit Ethernet support" 2250 tristate "SB1250 Gigabit Ethernet support"
2250 depends on SIBYTE_SB1xxx_SOC 2251 depends on SIBYTE_SB1xxx_SOC
@@ -2595,14 +2596,9 @@ config CHELSIO_T1_1G
2595 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2596 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2596 are using only 10G cards say 'N' here. 2597 are using only 10G cards say 'N' here.
2597 2598
2598config CHELSIO_T3_DEPENDS
2599 tristate
2600 depends on PCI && INET
2601 default y
2602
2603config CHELSIO_T3 2599config CHELSIO_T3
2604 tristate "Chelsio Communications T3 10Gb Ethernet support" 2600 tristate "Chelsio Communications T3 10Gb Ethernet support"
2605 depends on CHELSIO_T3_DEPENDS 2601 depends on PCI && INET
2606 select FW_LOADER 2602 select FW_LOADER
2607 select MDIO 2603 select MDIO
2608 help 2604 help
@@ -2620,14 +2616,9 @@ config CHELSIO_T3
2620 To compile this driver as a module, choose M here: the module 2616 To compile this driver as a module, choose M here: the module
2621 will be called cxgb3. 2617 will be called cxgb3.
2622 2618
2623config CHELSIO_T4_DEPENDS
2624 tristate
2625 depends on PCI && INET
2626 default y
2627
2628config CHELSIO_T4 2619config CHELSIO_T4
2629 tristate "Chelsio Communications T4 Ethernet support" 2620 tristate "Chelsio Communications T4 Ethernet support"
2630 depends on CHELSIO_T4_DEPENDS 2621 depends on PCI
2631 select FW_LOADER 2622 select FW_LOADER
2632 select MDIO 2623 select MDIO
2633 help 2624 help
@@ -2645,14 +2636,9 @@ config CHELSIO_T4
2645 To compile this driver as a module choose M here; the module 2636 To compile this driver as a module choose M here; the module
2646 will be called cxgb4. 2637 will be called cxgb4.
2647 2638
2648config CHELSIO_T4VF_DEPENDS
2649 tristate
2650 depends on PCI && INET
2651 default y
2652
2653config CHELSIO_T4VF 2639config CHELSIO_T4VF
2654 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 2640 tristate "Chelsio Communications T4 Virtual Function Ethernet support"
2655 depends on CHELSIO_T4VF_DEPENDS 2641 depends on PCI
2656 help 2642 help
2657 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 2643 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2658 adapters with PCI-E SR-IOV Virtual Functions. 2644 adapters with PCI-E SR-IOV Virtual Functions.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d13994..7c2171179f97 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o
150 151
151obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 152obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
152obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 153obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 21f501184023..1ff001a8270c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
547 hw->device_id = pdev->device; 547 hw->device_id = pdev->device;
548 hw->subsystem_vendor_id = pdev->subsystem_vendor; 548 hw->subsystem_vendor_id = pdev->subsystem_vendor;
549 hw->subsystem_id = pdev->subsystem_device; 549 hw->subsystem_id = pdev->subsystem_device;
550 hw->revision_id = pdev->revision;
550 551
551 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
553 553
554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); 554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 4e6f4e95a5a0..e637e9f28fd4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
93 hw->device_id = pdev->device; 93 hw->device_id = pdev->device;
94 hw->subsystem_vendor_id = pdev->subsystem_vendor; 94 hw->subsystem_vendor_id = pdev->subsystem_vendor;
95 hw->subsystem_id = pdev->subsystem_device; 95 hw->subsystem_id = pdev->subsystem_device;
96 hw->revision_id = pdev->revision;
96 97
97 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
99 99
100 adapter->wol = 0; 100 adapter->wol = 0;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index ed709a5d07d7..4ac0d72660fe 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -220,9 +220,7 @@ struct be_rx_obj {
220 struct be_rx_stats stats; 220 struct be_rx_stats stats;
221 u8 rss_id; 221 u8 rss_id;
222 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 222 bool rx_post_starved; /* Zero rx frags have been posted to BE */
223 u16 last_frag_index; 223 u32 cache_line_barrier[16];
224 u16 rsvd;
225 u32 cache_line_barrier[15];
226}; 224};
227 225
228struct be_drv_stats { 226struct be_drv_stats {
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1822ecdadc7e..cc3a235475bc 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -726,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
726 726
727 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 727 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
728 if (lancer_chip(adapter)) { 728 if (lancer_chip(adapter)) {
729 req->hdr.version = 1; 729 req->hdr.version = 2;
730 req->page_size = 1; /* 1 for 4K */ 730 req->page_size = 1; /* 1 for 4K */
731 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, 731 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
732 coalesce_wm); 732 coalesce_wm);
@@ -862,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
862 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, 862 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
863 sizeof(*req)); 863 sizeof(*req));
864 864
865 if (lancer_chip(adapter)) {
866 req->hdr.version = 1;
867 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
868 adapter->if_handle);
869 }
870
865 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 871 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
866 req->ulp_num = BE_ULP1_NUM; 872 req->ulp_num = BE_ULP1_NUM;
867 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 873 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e5768fc705..b4ac3938b298 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -430,7 +430,7 @@ struct be_cmd_resp_mcc_create {
430/* Pseudo amap definition in which each bit of the actual structure is defined 430/* Pseudo amap definition in which each bit of the actual structure is defined
431 * as a byte: used to calculate offset/shift/mask of each field */ 431 * as a byte: used to calculate offset/shift/mask of each field */
432struct amap_tx_context { 432struct amap_tx_context {
433 u8 rsvd0[16]; /* dword 0 */ 433 u8 if_id[16]; /* dword 0 */
434 u8 tx_ring_size[4]; /* dword 0 */ 434 u8 tx_ring_size[4]; /* dword 0 */
435 u8 rsvd1[26]; /* dword 0 */ 435 u8 rsvd1[26]; /* dword 0 */
436 u8 pci_func_id[8]; /* dword 1 */ 436 u8 pci_func_id[8]; /* dword 1 */
@@ -518,7 +518,8 @@ enum be_if_flags {
518 BE_IF_FLAGS_VLAN = 0x100, 518 BE_IF_FLAGS_VLAN = 0x100,
519 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, 519 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
520 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, 520 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
521 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 521 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
522 BE_IF_FLAGS_MULTICAST = 0x1000
522}; 523};
523 524
524/* An RX interface is an object with one or more MAC addresses and 525/* An RX interface is an object with one or more MAC addresses and
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 3f459f76cd1d..dbe67f353e8f 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -44,6 +44,18 @@
44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ 44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ 45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46 46
47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408
51
52#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000
54#define SLIPORT_STATUS_RDY_MASK 0x00800000
55
56
57#define SLI_PORT_CONTROL_IP_MASK 0x08000000
58
47/********* Memory BAR register ************/ 59/********* Memory BAR register ************/
48#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 60#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
49/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 61/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 0bdccb10aac5..68f107817326 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -865,14 +865,17 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
865 865
866static inline bool csum_passed(struct be_eth_rx_compl *rxcp) 866static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
867{ 867{
868 u8 l4_cksm, ipv6, ipcksm; 868 u8 l4_cksm, ipv6, ipcksm, tcpf, udpf;
869 869
870 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); 870 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); 871 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
872 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); 872 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
873 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
874 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
873 875
874 /* Ignore ipcksm for ipv6 pkts */ 876 /* L4 checksum is not reliable for non TCP/UDP packets.
875 return l4_cksm && (ipcksm || ipv6); 877 * Also ignore ipcksm for ipv6 pkts */
878 return (tcpf || udpf) && l4_cksm && (ipcksm || ipv6);
876} 879}
877 880
878static struct be_rx_page_info * 881static struct be_rx_page_info *
@@ -909,17 +912,11 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 912 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 913 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
911 914
912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ 915 for (i = 0; i < num_rcvd; i++) {
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) { 916 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
914 917 put_page(page_info->page);
915 rxo->last_frag_index = rxq_idx; 918 memset(page_info, 0, sizeof(*page_info));
916 919 index_inc(&rxq_idx, rxq->len);
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
922 }
923 } 920 }
924} 921}
925 922
@@ -1169,20 +1166,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1169 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; 1166 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1170} 1167}
1171 1168
1172static inline struct page *be_alloc_pages(u32 size) 1169static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1173{ 1170{
1174 gfp_t alloc_flags = GFP_ATOMIC;
1175 u32 order = get_order(size); 1171 u32 order = get_order(size);
1172
1176 if (order > 0) 1173 if (order > 0)
1177 alloc_flags |= __GFP_COMP; 1174 gfp |= __GFP_COMP;
1178 return alloc_pages(alloc_flags, order); 1175 return alloc_pages(gfp, order);
1179} 1176}
1180 1177
1181/* 1178/*
1182 * Allocate a page, split it to fragments of size rx_frag_size and post as 1179 * Allocate a page, split it to fragments of size rx_frag_size and post as
1183 * receive buffers to BE 1180 * receive buffers to BE
1184 */ 1181 */
1185static void be_post_rx_frags(struct be_rx_obj *rxo) 1182static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1186{ 1183{
1187 struct be_adapter *adapter = rxo->adapter; 1184 struct be_adapter *adapter = rxo->adapter;
1188 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; 1185 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1196,7 +1193,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1196 page_info = &rxo->page_info_tbl[rxq->head]; 1193 page_info = &rxo->page_info_tbl[rxq->head];
1197 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1194 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1198 if (!pagep) { 1195 if (!pagep) {
1199 pagep = be_alloc_pages(adapter->big_page_size); 1196 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1200 if (unlikely(!pagep)) { 1197 if (unlikely(!pagep)) {
1201 rxo->stats.rx_post_fail++; 1198 rxo->stats.rx_post_fail++;
1202 break; 1199 break;
@@ -1579,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1579 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1576 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1580 for_all_rx_queues(adapter, rxo, i) { 1577 for_all_rx_queues(adapter, rxo, i) {
1581 rxo->adapter = adapter; 1578 rxo->adapter = adapter;
1582 /* Init last_frag_index so that the frag index in the first
1583 * completion will never match */
1584 rxo->last_frag_index = 0xffff;
1585 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1586 rxo->rx_eq.enable_aic = true; 1580 rxo->rx_eq.enable_aic = true;
1587 1581
@@ -1722,7 +1716,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1722 struct be_queue_info *rx_cq = &rxo->cq; 1716 struct be_queue_info *rx_cq = &rxo->cq;
1723 struct be_eth_rx_compl *rxcp; 1717 struct be_eth_rx_compl *rxcp;
1724 u32 work_done; 1718 u32 work_done;
1725 u16 frag_index, num_rcvd; 1719 u16 num_rcvd;
1726 u8 err; 1720 u8 err;
1727 1721
1728 rxo->stats.rx_polls++; 1722 rxo->stats.rx_polls++;
@@ -1732,16 +1726,10 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1732 break; 1726 break;
1733 1727
1734 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); 1728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1735 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1736 rxcp);
1737 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, 1729 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1738 rxcp); 1730 rxcp);
1739 1731 /* Ignore flush completions */
1740 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ 1732 if (num_rcvd) {
1741 if (likely(frag_index != rxo->last_frag_index &&
1742 num_rcvd != 0)) {
1743 rxo->last_frag_index = frag_index;
1744
1745 if (do_gro(rxo, rxcp, err)) 1733 if (do_gro(rxo, rxcp, err))
1746 be_rx_compl_process_gro(adapter, rxo, rxcp); 1734 be_rx_compl_process_gro(adapter, rxo, rxcp);
1747 else 1735 else
@@ -1753,7 +1741,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1753 1741
1754 /* Refill the queue */ 1742 /* Refill the queue */
1755 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1743 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1756 be_post_rx_frags(rxo); 1744 be_post_rx_frags(rxo, GFP_ATOMIC);
1757 1745
1758 /* All consumed */ 1746 /* All consumed */
1759 if (work_done < budget) { 1747 if (work_done < budget) {
@@ -1890,7 +1878,7 @@ static void be_worker(struct work_struct *work)
1890 1878
1891 if (rxo->rx_post_starved) { 1879 if (rxo->rx_post_starved) {
1892 rxo->rx_post_starved = false; 1880 rxo->rx_post_starved = false;
1893 be_post_rx_frags(rxo); 1881 be_post_rx_frags(rxo, GFP_KERNEL);
1894 } 1882 }
1895 } 1883 }
1896 if (!adapter->ue_detected && !lancer_chip(adapter)) 1884 if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2094,13 +2082,24 @@ static int be_close(struct net_device *netdev)
2094 2082
2095 be_async_mcc_disable(adapter); 2083 be_async_mcc_disable(adapter);
2096 2084
2097 netif_stop_queue(netdev);
2098 netif_carrier_off(netdev); 2085 netif_carrier_off(netdev);
2099 adapter->link_up = false; 2086 adapter->link_up = false;
2100 2087
2101 if (!lancer_chip(adapter)) 2088 if (!lancer_chip(adapter))
2102 be_intr_set(adapter, false); 2089 be_intr_set(adapter, false);
2103 2090
2091 for_all_rx_queues(adapter, rxo, i)
2092 napi_disable(&rxo->rx_eq.napi);
2093
2094 napi_disable(&tx_eq->napi);
2095
2096 if (lancer_chip(adapter)) {
2097 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2098 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2099 for_all_rx_queues(adapter, rxo, i)
2100 be_cq_notify(adapter, rxo->cq.id, false, 0);
2101 }
2102
2104 if (adapter->msix_enabled) { 2103 if (adapter->msix_enabled) {
2105 vec = be_msix_vec_get(adapter, tx_eq); 2104 vec = be_msix_vec_get(adapter, tx_eq);
2106 synchronize_irq(vec); 2105 synchronize_irq(vec);
@@ -2114,11 +2113,6 @@ static int be_close(struct net_device *netdev)
2114 } 2113 }
2115 be_irq_unregister(adapter); 2114 be_irq_unregister(adapter);
2116 2115
2117 for_all_rx_queues(adapter, rxo, i)
2118 napi_disable(&rxo->rx_eq.napi);
2119
2120 napi_disable(&tx_eq->napi);
2121
2122 /* Wait for all pending tx completions to arrive so that 2116 /* Wait for all pending tx completions to arrive so that
2123 * all tx skbs are freed. 2117 * all tx skbs are freed.
2124 */ 2118 */
@@ -2138,7 +2132,7 @@ static int be_open(struct net_device *netdev)
2138 u16 link_speed; 2132 u16 link_speed;
2139 2133
2140 for_all_rx_queues(adapter, rxo, i) { 2134 for_all_rx_queues(adapter, rxo, i) {
2141 be_post_rx_frags(rxo); 2135 be_post_rx_frags(rxo, GFP_KERNEL);
2142 napi_enable(&rxo->rx_eq.napi); 2136 napi_enable(&rxo->rx_eq.napi);
2143 } 2137 }
2144 napi_enable(&tx_eq->napi); 2138 napi_enable(&tx_eq->napi);
@@ -2269,7 +2263,9 @@ static int be_setup(struct be_adapter *adapter)
2269 int status; 2263 int status;
2270 u8 mac[ETH_ALEN]; 2264 u8 mac[ETH_ALEN];
2271 2265
2272 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; 2266 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2267 BE_IF_FLAGS_BROADCAST |
2268 BE_IF_FLAGS_MULTICAST;
2273 2269
2274 if (be_physfn(adapter)) { 2270 if (be_physfn(adapter)) {
2275 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | 2271 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2913,6 +2909,54 @@ static int be_dev_family_check(struct be_adapter *adapter)
2913 return 0; 2909 return 0;
2914} 2910}
2915 2911
2912static int lancer_wait_ready(struct be_adapter *adapter)
2913{
2914#define SLIPORT_READY_TIMEOUT 500
2915 u32 sliport_status;
2916 int status = 0, i;
2917
2918 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2919 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2920 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2921 break;
2922
2923 msleep(20);
2924 }
2925
2926 if (i == SLIPORT_READY_TIMEOUT)
2927 status = -1;
2928
2929 return status;
2930}
2931
2932static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2933{
2934 int status;
2935 u32 sliport_status, err, reset_needed;
2936 status = lancer_wait_ready(adapter);
2937 if (!status) {
2938 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2939 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2940 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2941 if (err && reset_needed) {
2942 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2943 adapter->db + SLIPORT_CONTROL_OFFSET);
2944
2945 /* check adapter has corrected the error */
2946 status = lancer_wait_ready(adapter);
2947 sliport_status = ioread32(adapter->db +
2948 SLIPORT_STATUS_OFFSET);
2949 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2950 SLIPORT_STATUS_RN_MASK);
2951 if (status || sliport_status)
2952 status = -1;
2953 } else if (err || reset_needed) {
2954 status = -1;
2955 }
2956 }
2957 return status;
2958}
2959
2916static int __devinit be_probe(struct pci_dev *pdev, 2960static int __devinit be_probe(struct pci_dev *pdev,
2917 const struct pci_device_id *pdev_id) 2961 const struct pci_device_id *pdev_id)
2918{ 2962{
@@ -2962,6 +3006,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
2962 if (status) 3006 if (status)
2963 goto free_netdev; 3007 goto free_netdev;
2964 3008
3009 if (lancer_chip(adapter)) {
3010 status = lancer_test_and_set_rdy_state(adapter);
3011 if (status) {
3012 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3013 goto free_netdev;
3014 }
3015 }
3016
2965 /* sync up with fw's ready state */ 3017 /* sync up with fw's ready state */
2966 if (be_physfn(adapter)) { 3018 if (be_physfn(adapter)) {
2967 status = be_cmd_POST(adapter); 3019 status = be_cmd_POST(adapter);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c0dd30d870ae..50d1e0793091 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -31,7 +31,7 @@
31#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
34#define BCM_DCB 34#define BCM_DCBNL
35#endif 35#endif
36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
37#define BCM_CNIC 1 37#define BCM_CNIC 1
@@ -1631,19 +1631,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1631#define BNX2X_BTR 4 1631#define BNX2X_BTR 4
1632#define MAX_SPQ_PENDING 8 1632#define MAX_SPQ_PENDING 8
1633 1633
1634 1634/* CMNG constants, as derived from system spec calculations */
1635/* CMNG constants 1635/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1636 derived from lab experiments, and not from system spec calculations !!! */ 1636#define DEF_MIN_RATE 100
1637#define DEF_MIN_RATE 100
1638/* resolution of the rate shaping timer - 100 usec */ 1637/* resolution of the rate shaping timer - 100 usec */
1639#define RS_PERIODIC_TIMEOUT_USEC 100 1638#define RS_PERIODIC_TIMEOUT_USEC 100
1640/* resolution of fairness algorithm in usecs -
1641 coefficient for calculating the actual t fair */
1642#define T_FAIR_COEF 10000000
1643/* number of bytes in single QM arbitration cycle - 1639/* number of bytes in single QM arbitration cycle -
1644 coefficient for calculating the fairness timer */ 1640 * coefficient for calculating the fairness timer */
1645#define QM_ARB_BYTES 40000 1641#define QM_ARB_BYTES 160000
1646#define FAIR_MEM 2 1642/* resolution of Min algorithm 1:100 */
1643#define MIN_RES 100
1644/* how many bytes above threshold for the minimal credit of Min algorithm*/
1645#define MIN_ABOVE_THRESH 32768
1646/* Fairness algorithm integration time coefficient -
1647 * for calculating the actual Tfair */
1648#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1649/* Memory of fairness algorithm . 2 cycles */
1650#define FAIR_MEM 2
1647 1651
1648 1652
1649#define ATTN_NIG_FOR_FUNC (1L << 8) 1653#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 6fac8e183c59..b01b622f4e13 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
259#endif 259#endif
260} 260}
261 261
262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb, 297 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe, 298 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx) 299 u16 cqe_idx, u16 parsing_flags)
266{ 300{
267 struct sw_rx_page *rx_pg, old_rx_pg; 301 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
275 309
276 /* This is needed in order to enable forwarding support */ 310 /* This is needed in order to enable forwarding support */
277 if (frag_size) 311 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
279 max(frag_size, (u32)len_on_bd)); 313 len_on_bd);
280 314
281#ifdef BNX2X_STOP_ON_ERROR 315#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344 if (likely(new_skb)) { 378 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */ 380 /* (no need to map the new skb) */
381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
347 383
348 prefetch(skb); 384 prefetch(skb);
349 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
373 } 409 }
374 410
375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) { 412 &cqe->fast_path_cqe, cqe_idx,
377 if ((le16_to_cpu(cqe->fast_path_cqe. 413 parsing_flags)) {
378 pars_flags.flags) & PARSING_FLAGS_VLAN)) 414 if (parsing_flags & PARSING_FLAGS_VLAN)
379 __vlan_hwaccel_put_tag(skb, 415 __vlan_hwaccel_put_tag(skb,
380 le16_to_cpu(cqe->fast_path_cqe. 416 le16_to_cpu(cqe->fast_path_cqe.
381 vlan_tag)); 417 vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{ 739{
704 u16 line_speed = bp->link_vars.line_speed; 740 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) { 741 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
707 FUNC_MF_CFG_MAX_BW_MASK) >> 743 bp->mf_config[BP_VN(bp)]);
708 FUNC_MF_CFG_MAX_BW_SHIFT; 744
709 /* Calculate the current MAX line speed limit for the DCC 745 /* Calculate the current MAX line speed limit for the MF
710 * capable devices 746 * devices
711 */ 747 */
712 if (IS_MF_SD(bp)) { 748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
713 u16 vn_max_rate = maxCfg * 100; 751 u16 vn_max_rate = maxCfg * 100;
714 752
715 if (vn_max_rate < line_speed) 753 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate; 754 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */ 755 }
718 line_speed = (line_speed * maxCfg) / 100;
719 } 756 }
720 757
721 return line_speed; 758 return line_speed;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index f062d5d20fa9..8c401c990bf2 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1044void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1044void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1045void bnx2x_release_phy_lock(struct bnx2x *bp); 1045void bnx2x_release_phy_lock(struct bnx2x *bp);
1046 1046
1047/**
1048 * Extracts MAX BW part from MF configuration.
1049 *
1050 * @param bp
1051 * @param mf_cfg
1052 *
1053 * @return u16
1054 */
1055static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1056{
1057 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1058 FUNC_MF_CFG_MAX_BW_SHIFT;
1059 if (!max_cfg) {
1060 BNX2X_ERR("Illegal configuration detected for Max BW - "
1061 "using 100 instead\n");
1062 max_cfg = 100;
1063 }
1064 return max_cfg;
1065}
1066
1047#endif /* BNX2X_CMN_H */ 1067#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index fb60021f81fb..9a24d79c71d9 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,6 +19,9 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#ifdef BCM_DCBNL
23#include <linux/dcbnl.h>
24#endif
22 25
23#include "bnx2x.h" 26#include "bnx2x.h"
24#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
508 return 0; 511 return 0;
509} 512}
510 513
514
515#ifdef BCM_DCBNL
516static inline
517u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
518{
519 u8 pri;
520
521 /* Choose the highest priority */
522 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
523 if (ent->pri_bitmap & (1 << pri))
524 break;
525 return pri;
526}
527
528static inline
529u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
530{
531 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
532 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
533 DCB_APP_IDTYPE_ETHTYPE;
534}
535
536static inline
537void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
538{
539 int i;
540 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
541 bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
542 ~DCBX_APP_ENTRY_VALID;
543}
544
545int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
546{
547 int i, err = 0;
548
549 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
550 struct dcbx_app_priority_entry *ent =
551 &bp->dcbx_local_feat.app.app_pri_tbl[i];
552
553 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
554 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
555
556 /* avoid invalid user-priority */
557 if (up) {
558 struct dcb_app app;
559 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
560 app.protocol = ent->app_id;
561 app.priority = delall ? 0 : up;
562 err = dcb_setapp(bp->dev, &app);
563 }
564 }
565 }
566 return err;
567}
568#endif
569
511void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 570void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
512{ 571{
513 switch (state) { 572 switch (state) {
514 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
515 { 574 {
516 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
517 576#ifdef BCM_DCBNL
577 /**
578 * Delete app tlvs from dcbnl before reading new
579 * negotiation results
580 */
581 bnx2x_dcbnl_update_applist(bp, true);
582#endif
518 /* Read neg results if dcbx is in the FW */ 583 /* Read neg results if dcbx is in the FW */
519 if (bnx2x_dcbx_read_shmem_neg_results(bp)) 584 if (bnx2x_dcbx_read_shmem_neg_results(bp))
520 return; 585 return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
526 bp->dcbx_error); 591 bp->dcbx_error);
527 592
528 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { 593 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
594#ifdef BCM_DCBNL
595 /**
596 * Add new app tlvs to dcbnl
597 */
598 bnx2x_dcbnl_update_applist(bp, false);
599#endif
529 bnx2x_dcbx_stop_hw_tx(bp); 600 bnx2x_dcbx_stop_hw_tx(bp);
530 return; 601 return;
531 } 602 }
532 /* fall through */ 603 /* fall through */
604#ifdef BCM_DCBNL
605 /**
606 * Invalidate the local app tlvs if they are not added
607 * to the dcbnl app list to avoid deleting them from
608 * the list later on
609 */
610 bnx2x_dcbx_invalidate_local_apps(bp);
611#endif
533 } 612 }
534 case BNX2X_DCBX_STATE_TX_PAUSED: 613 case BNX2X_DCBX_STATE_TX_PAUSED:
535 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); 614 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1505 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); 1584 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1506} 1585}
1507/* DCB netlink */ 1586/* DCB netlink */
1508#ifdef BCM_DCB 1587#ifdef BCM_DCBNL
1509#include <linux/dcbnl.h>
1510 1588
1511#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ 1589#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1512 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) 1590 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1816 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); 1894 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1817} 1895}
1818 1896
1819static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
1820 u8 idtype, u16 idval)
1821{
1822 if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
1823 return false;
1824
1825 switch (idtype) {
1826 case DCB_APP_IDTYPE_ETHTYPE:
1827 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1828 DCBX_APP_SF_ETH_TYPE)
1829 return false;
1830 break;
1831 case DCB_APP_IDTYPE_PORTNUM:
1832 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1833 DCBX_APP_SF_PORT)
1834 return false;
1835 break;
1836 default:
1837 return false;
1838 }
1839 if (app_ent->app_id != idval)
1840 return false;
1841
1842 return true;
1843}
1844
1845static void bnx2x_admin_app_set_ent( 1897static void bnx2x_admin_app_set_ent(
1846 struct bnx2x_admin_priority_app_table *app_ent, 1898 struct bnx2x_admin_priority_app_table *app_ent,
1847 u8 idtype, u16 idval, u8 up) 1899 u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
1943 return bnx2x_set_admin_app_up(bp, idtype, idval, up); 1995 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
1944} 1996}
1945 1997
1946static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
1947 u16 idval)
1948{
1949 int i;
1950 u8 up = 0;
1951
1952 struct bnx2x *bp = netdev_priv(netdev);
1953 DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
1954
1955 /* iterate over the app entries looking for idtype and idval */
1956 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
1957 if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
1958 idtype, idval))
1959 break;
1960
1961 if (i < DCBX_MAX_APP_PROTOCOL)
1962 /* if found return up */
1963 up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
1964 else
1965 DP(NETIF_MSG_LINK, "app not found\n");
1966
1967 return up;
1968}
1969
1970static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) 1998static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
1971{ 1999{
1972 struct bnx2x *bp = netdev_priv(netdev); 2000 struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2107 .setnumtcs = bnx2x_dcbnl_set_numtcs, 2135 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2108 .getpfcstate = bnx2x_dcbnl_get_pfc_state, 2136 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2109 .setpfcstate = bnx2x_dcbnl_set_pfc_state, 2137 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2110 .getapp = bnx2x_dcbnl_get_app_up,
2111 .setapp = bnx2x_dcbnl_set_app_up, 2138 .setapp = bnx2x_dcbnl_set_app_up,
2112 .getdcbx = bnx2x_dcbnl_get_dcbx, 2139 .getdcbx = bnx2x_dcbnl_get_dcbx,
2113 .setdcbx = bnx2x_dcbnl_set_dcbx, 2140 .setdcbx = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2115 .setfeatcfg = bnx2x_dcbnl_set_featcfg, 2142 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2116}; 2143};
2117 2144
2118#endif /* BCM_DCB */ 2145#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index f650f98e4092..71b8eda43bd0 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -189,8 +189,9 @@ enum {
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 190
191/* DCB netlink */ 191/* DCB netlink */
192#ifdef BCM_DCB 192#ifdef BCM_DCBNL
193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; 193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
194#endif /* BCM_DCB */ 194int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
195#endif /* BCM_DCBNL */
195 196
196#endif /* BNX2X_DCB_H */ 197#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8d19d127f796..85291d8b3329 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
238 speed |= (cmd->speed_hi << 16); 238 speed |= (cmd->speed_hi << 16);
239 239
240 if (IS_MF_SI(bp)) { 240 if (IS_MF_SI(bp)) {
241 u32 param = 0; 241 u32 param = 0, part;
242 u32 line_speed = bp->link_vars.line_speed; 242 u32 line_speed = bp->link_vars.line_speed;
243 243
244 /* use 10G if no link detected */ 244 /* use 10G if no link detected */
@@ -251,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
251 REQ_BC_VER_4_SET_MF_BW); 251 REQ_BC_VER_4_SET_MF_BW);
252 return -EINVAL; 252 return -EINVAL;
253 } 253 }
254 if (line_speed < speed) { 254 part = (speed * 100) / line_speed;
255 BNX2X_DEV_INFO("New speed should be less or equal " 255 if (line_speed < speed || !part) {
256 "to actual line speed\n"); 256 BNX2X_DEV_INFO("Speed setting should be in a range "
257 "from 1%% to 100%% "
258 "of actual line speed\n");
257 return -EINVAL; 259 return -EINVAL;
258 } 260 }
259 /* load old values */ 261 /* load old values */
@@ -263,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
263 param &= FUNC_MF_CFG_MIN_BW_MASK; 265 param &= FUNC_MF_CFG_MIN_BW_MASK;
264 266
265 /* set new MAX value */ 267 /* set new MAX value */
266 param |= (((speed * 100) / line_speed) 268 param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT)
267 << FUNC_MF_CFG_MAX_BW_SHIFT)
268 & FUNC_MF_CFG_MAX_BW_MASK; 269 & FUNC_MF_CFG_MAX_BW_MASK;
269 270
270 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); 271 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
@@ -1781,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
1781 { 0x100, 0x350 }, /* manuf_info */ 1782 { 0x100, 0x350 }, /* manuf_info */
1782 { 0x450, 0xf0 }, /* feature_info */ 1783 { 0x450, 0xf0 }, /* feature_info */
1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1784 { 0x640, 0x64 }, /* upgrade_key_info */
1784 { 0x6a4, 0x64 },
1785 { 0x708, 0x70 }, /* manuf_key_info */ 1785 { 0x708, 0x70 }, /* manuf_key_info */
1786 { 0x778, 0x70 },
1787 { 0, 0 } 1786 { 0, 0 }
1788 }; 1787 };
1789 __be32 buf[0x350 / 4]; 1788 __be32 buf[0x350 / 4];
@@ -1933,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev,
1933 buf[4] = 1; 1932 buf[4] = 1;
1934 etest->flags |= ETH_TEST_FL_FAILED; 1933 etest->flags |= ETH_TEST_FL_FAILED;
1935 } 1934 }
1936 if (bp->port.pmf) 1935
1937 if (bnx2x_link_test(bp, is_serdes) != 0) { 1936 if (bnx2x_link_test(bp, is_serdes) != 0) {
1938 buf[5] = 1; 1937 buf[5] = 1;
1939 etest->flags |= ETH_TEST_FL_FAILED; 1938 etest->flags |= ETH_TEST_FL_FAILED;
1940 } 1939 }
1941 1940
1942#ifdef BNX2X_EXTRA_DEBUG 1941#ifdef BNX2X_EXTRA_DEBUG
1943 bnx2x_panic_dump(bp); 1942 bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9a0895..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment. 242 * want to handle "system kill" flow at the moment.
243 */ 243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 244 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 6c7745eee00d..30b21d2f26f6 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1974 vn_max_rate = 0; 1974 vn_max_rate = 0;
1975 1975
1976 } else { 1976 } else {
1977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1979 /* If min rate is zero - set it to 1 */ 1981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
1980 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1984 if (bp->vn_weight_sum && (vn_min_rate == 0))
1981 vn_min_rate = DEF_MIN_RATE; 1985 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1986
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
1984 } 1993 }
1985 1994
1986 DP(NETIF_MSG_IFUP, 1995 DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2006 m_fair_vn.vn_credit_delta = 2015 m_fair_vn.vn_credit_delta =
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))), 2017 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2)); 2018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2011 m_fair_vn.vn_credit_delta); 2021 m_fair_vn.vn_credit_delta);
2012 } 2022 }
@@ -9441,7 +9451,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9441 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9451 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9442 dev->vlan_features |= NETIF_F_TSO6; 9452 dev->vlan_features |= NETIF_F_TSO6;
9443 9453
9444#ifdef BCM_DCB 9454#ifdef BCM_DCBNL
9445 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 9455 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9446#endif 9456#endif
9447 9457
@@ -9848,6 +9858,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9848 } 9858 }
9849#endif 9859#endif
9850 9860
9861#ifdef BCM_DCBNL
9862 /* Delete app tlvs from dcbnl */
9863 bnx2x_dcbnl_update_applist(bp, true);
9864#endif
9865
9851 unregister_netdev(dev); 9866 unregister_netdev(dev);
9852 9867
9853 /* Delete all NAPI objects */ 9868 /* Delete all NAPI objects */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d590fa8..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1239 if (unlikely(bp->panic)) 1239 if (unlikely(bp->panic))
1240 return; 1240 return;
1241 1241
1242 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243
1242 /* Protect a state change flow */ 1244 /* Protect a state change flow */
1243 spin_lock_bh(&bp->stats_lock); 1245 spin_lock_bh(&bp->stats_lock);
1244 state = bp->stats_state; 1246 state = bp->stats_state;
1245 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1246 spin_unlock_bh(&bp->stats_lock); 1248 spin_unlock_bh(&bp->stats_lock);
1247 1249
1248 bnx2x_stats_stm[state][event].action(bp);
1249
1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1252 state, event, bp->stats_state); 1252 state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5c6fba802f2b..9bc5de3e04a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
604 604
605 _lock_rx_hashtbl(bond); 605 _lock_rx_hashtbl(bond);
606 606
607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); 607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
608 client_info = &(bond_info->rx_hashtbl[hash_index]); 608 client_info = &(bond_info->rx_hashtbl[hash_index]);
609 609
610 if (client_info->assigned) { 610 if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 584f97b73060..7b7ca971672f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1380,14 +1380,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
1380static void bond_netpoll_cleanup(struct net_device *bond_dev) 1380static void bond_netpoll_cleanup(struct net_device *bond_dev)
1381{ 1381{
1382} 1382}
1383static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1384{
1385 return 0;
1386}
1387static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1388{
1389 return NULL;
1390}
1391#endif 1383#endif
1392 1384
1393/*---------------------------------- IOCTL ----------------------------------*/ 1385/*---------------------------------- IOCTL ----------------------------------*/
@@ -1519,9 +1511,13 @@ static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
1519 if (bond_dev->priv_flags & IFF_MASTER_ALB && 1511 if (bond_dev->priv_flags & IFF_MASTER_ALB &&
1520 bond_dev->priv_flags & IFF_BRIDGE_PORT && 1512 bond_dev->priv_flags & IFF_BRIDGE_PORT &&
1521 skb->pkt_type == PACKET_HOST) { 1513 skb->pkt_type == PACKET_HOST) {
1522 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
1523 1514
1524 memcpy(dest, bond_dev->dev_addr, ETH_ALEN); 1515 if (unlikely(skb_cow_head(skb,
1516 skb->data - skb_mac_header(skb)))) {
1517 kfree_skb(skb);
1518 return NULL;
1519 }
1520 memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
1525 } 1521 }
1526 1522
1527 return skb; 1523 return skb;
@@ -2681,7 +2677,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2681 2677
2682static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2678static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2683{ 2679{
2684 int i, vlan_id, rv; 2680 int i, vlan_id;
2685 __be32 *targets = bond->params.arp_targets; 2681 __be32 *targets = bond->params.arp_targets;
2686 struct vlan_entry *vlan; 2682 struct vlan_entry *vlan;
2687 struct net_device *vlan_dev; 2683 struct net_device *vlan_dev;
@@ -2708,8 +2704,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2708 fl.fl4_dst = targets[i]; 2704 fl.fl4_dst = targets[i];
2709 fl.fl4_tos = RTO_ONLINK; 2705 fl.fl4_tos = RTO_ONLINK;
2710 2706
2711 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); 2707 rt = ip_route_output_key(dev_net(bond->dev), &fl);
2712 if (rv) { 2708 if (IS_ERR(rt)) {
2713 if (net_ratelimit()) { 2709 if (net_ratelimit()) {
2714 pr_warning("%s: no route to arp_ip_target %pI4\n", 2710 pr_warning("%s: no route to arp_ip_target %pI4\n",
2715 bond->dev->name, &fl.fl4_dst); 2711 bond->dev->name, &fl.fl4_dst);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a401b8df84f0..ff4e26980220 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -269,7 +269,8 @@ struct bonding {
269 * 269 *
270 * Caller must hold bond lock for read 270 * Caller must hold bond lock for read
271 */ 271 */
272static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) 272static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
273 struct net_device *slave_dev)
273{ 274{
274 struct slave *slave = NULL; 275 struct slave *slave = NULL;
275 int i; 276 int i;
@@ -280,7 +281,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
280 } 281 }
281 } 282 }
282 283
283 return 0; 284 return NULL;
284} 285}
285 286
286static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 287static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15e96eb..aeea9f9ff6e8 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
633}; 633};
634 634
635static const struct can_bittiming_const softing_btr_const = { 635static const struct can_bittiming_const softing_btr_const = {
636 .name = "softing",
636 .tseg1_min = 1, 637 .tseg1_min = 1,
637 .tseg1_max = 16, 638 .tseg1_max = 16,
638 .tseg2_min = 1, 639 .tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 2d2d28f58e91..271a1f00c224 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2767,6 +2767,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2767 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2767 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2768 int kcqe_cnt; 2768 int kcqe_cnt;
2769 2769
2770 /* status block index must be read before reading other fields */
2771 rmb();
2770 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2772 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2771 2773
2772 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2774 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2777,6 +2779,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2777 barrier(); 2779 barrier();
2778 if (status_idx != *cp->kcq1.status_idx_ptr) { 2780 if (status_idx != *cp->kcq1.status_idx_ptr) {
2779 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2781 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2782 /* status block index must be read first */
2783 rmb();
2780 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2784 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2781 } else 2785 } else
2782 break; 2786 break;
@@ -2895,6 +2899,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2895 u32 last_status = *info->status_idx_ptr; 2899 u32 last_status = *info->status_idx_ptr;
2896 int kcqe_cnt; 2900 int kcqe_cnt;
2897 2901
2902 /* status block index must be read before reading the KCQ */
2903 rmb();
2898 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2904 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2899 2905
2900 service_kcqes(dev, kcqe_cnt); 2906 service_kcqes(dev, kcqe_cnt);
@@ -2905,6 +2911,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2905 break; 2911 break;
2906 2912
2907 last_status = *info->status_idx_ptr; 2913 last_status = *info->status_idx_ptr;
2914 /* status block index must be read before reading the KCQ */
2915 rmb();
2908 } 2916 }
2909 return last_status; 2917 return last_status;
2910} 2918}
@@ -2913,26 +2921,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2913{ 2921{
2914 struct cnic_dev *dev = (struct cnic_dev *) data; 2922 struct cnic_dev *dev = (struct cnic_dev *) data;
2915 struct cnic_local *cp = dev->cnic_priv; 2923 struct cnic_local *cp = dev->cnic_priv;
2916 u32 status_idx; 2924 u32 status_idx, new_status_idx;
2917 2925
2918 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2926 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2919 return; 2927 return;
2920 2928
2921 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2929 while (1) {
2930 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2922 2931
2923 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2932 CNIC_WR16(dev, cp->kcq1.io_addr,
2933 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2924 2934
2925 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2935 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2926 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2936 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2937 status_idx, IGU_INT_ENABLE, 1);
2938 break;
2939 }
2940
2941 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2942
2943 if (new_status_idx != status_idx)
2944 continue;
2927 2945
2928 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2946 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2929 MAX_KCQ_IDX); 2947 MAX_KCQ_IDX);
2930 2948
2931 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2949 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2932 status_idx, IGU_INT_ENABLE, 1); 2950 status_idx, IGU_INT_ENABLE, 1);
2933 } else { 2951
2934 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2952 break;
2935 status_idx, IGU_INT_ENABLE, 1);
2936 } 2953 }
2937} 2954}
2938 2955
@@ -3397,9 +3414,12 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3397 memset(&fl, 0, sizeof(fl)); 3414 memset(&fl, 0, sizeof(fl));
3398 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 3415 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
3399 3416
3400 err = ip_route_output_key(&init_net, &rt, &fl); 3417 rt = ip_route_output_key(&init_net, &fl);
3401 if (!err) 3418 err = 0;
3419 if (!IS_ERR(rt))
3402 *dst = &rt->dst; 3420 *dst = &rt->dst;
3421 else
3422 err = PTR_ERR(rt);
3403 return err; 3423 return err;
3404#else 3424#else
3405 return -ENETUNREACH; 3425 return -ENETUNREACH;
@@ -5158,15 +5178,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5158 5178
5159 dev_hold(dev); 5179 dev_hold(dev);
5160 pci_dev_get(pdev); 5180 pci_dev_get(pdev);
5161 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 5181 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5162 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 5182 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5163 u8 rev; 5183 (pdev->revision < 0x10)) {
5164 5184 pci_dev_put(pdev);
5165 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 5185 goto cnic_err;
5166 if (rev < 0x10) {
5167 pci_dev_put(pdev);
5168 goto cnic_err;
5169 }
5170 } 5186 }
5171 pci_dev_put(pdev); 5187 pci_dev_put(pdev);
5172 5188
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..7018bfe408a4 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
1008 int ret; 1008 int ret;
1009 1009
1010 /* free and bail if we are shutting down */ 1010 /* free and bail if we are shutting down */
1011 if (unlikely(!netif_running(ndev))) { 1011 if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
1012 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return; 1013 return;
1014 } 1014 }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d900..317708113601 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
802 /* Checksum mode */ 802 /* Checksum mode */
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum); 803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
804 804
805 /* GPIO0 on pre-activate PHY */
806 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
807 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 805 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
808 iow(db, DM9000_GPR, 0); /* Enable PHY */
809 806
810 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 807 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
811 808
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
852 unsigned long flags; 849 unsigned long flags;
853 850
854 /* Save previous register address */ 851 /* Save previous register address */
855 reg_save = readb(db->io_addr);
856 spin_lock_irqsave(&db->lock, flags); 852 spin_lock_irqsave(&db->lock, flags);
853 reg_save = readb(db->io_addr);
857 854
858 netif_stop_queue(dev); 855 netif_stop_queue(dev);
859 dm9000_reset(db); 856 dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
1194 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1191 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1195 return -EAGAIN; 1192 return -EAGAIN;
1196 1193
1194 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1195 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1196 mdelay(1); /* delay needs by DM9000B */
1197
1197 /* Initialize DM9000 board */ 1198 /* Initialize DM9000 board */
1198 dm9000_reset(db); 1199 dm9000_reset(db);
1199 dm9000_init_dm9000(dev); 1200 dm9000_init_dm9000(dev);
@@ -1592,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
1592 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1593 } 1594 }
1594 1595
1595 if (!is_valid_ether_addr(ndev->dev_addr)) 1596 if (!is_valid_ether_addr(ndev->dev_addr)) {
1596 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1597 "set using ifconfig\n", ndev->name); 1598 "set using ifconfig\n", ndev->name);
1598 1599
1600 random_ether_addr(ndev->dev_addr);
1601 mac_src = "random";
1602 }
1603
1604
1599 platform_set_drvdata(pdev, ndev); 1605 platform_set_drvdata(pdev, ndev);
1600 ret = register_netdev(ndev); 1606 ret = register_netdev(ndev);
1601 1607
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
337 for (i = 0; i < PHY_MAX_ADDR; i++) 337 for (i = 0; i < PHY_MAX_ADDR; i++)
338 bp->mii_bus->irq[i] = PHY_POLL; 338 bp->mii_bus->irq[i] = PHY_POLL;
339 339
340 platform_set_drvdata(bp->dev, bp->mii_bus);
341
342 if (mdiobus_register(bp->mii_bus)) { 340 if (mdiobus_register(bp->mii_bus)) {
343 err = -ENXIO; 341 err = -ENXIO;
344 goto err_out_free_mdio_irq; 342 goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
863 bp = netdev_priv(dev); 861 bp = netdev_priv(dev);
864 bp->dev = dev; 862 bp->dev = dev;
865 863
864 platform_set_drvdata(pdev, dev);
866 SET_NETDEV_DEV(dev, &pdev->dev); 865 SET_NETDEV_DEV(dev, &pdev->dev);
867 866
868 spin_lock_init(&bp->lock); 867 spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711f1688..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
42#define GBE_CONFIG_RAM_BASE \ 42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44 44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 45#define GBE_CONFIG_BASE_VIRT \
46 ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
46 47
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count)) 49 (iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 65ef9b5548d8..d4e51aa231b9 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
433 struct e1000_hw *hw = &adapter->hw; 433 struct e1000_hw *hw = &adapter->hw;
434 u32 *regs_buff = p; 434 u32 *regs_buff = p;
435 u16 phy_data; 435 u16 phy_data;
436 u8 revision_id;
437 436
438 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 437 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
439 438
440 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 439 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
441 440 adapter->pdev->device;
442 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
443 441
444 regs_buff[0] = er32(CTRL); 442 regs_buff[0] = er32(CTRL);
445 regs_buff[1] = er32(STATUS); 443 regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec0b803c501e..455d5a1101ed 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -6012,7 +6012,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6012 /* APME bit in EEPROM is mapped to WUC.APME */ 6012 /* APME bit in EEPROM is mapped to WUC.APME */
6013 eeprom_data = er32(WUC); 6013 eeprom_data = er32(WUC);
6014 eeprom_apme_mask = E1000_WUC_APME; 6014 eeprom_apme_mask = E1000_WUC_APME;
6015 if (eeprom_data & E1000_WUC_PHY_WAKE) 6015 if ((hw->mac.type > e1000_ich10lan) &&
6016 (eeprom_data & E1000_WUC_PHY_WAKE))
6016 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6017 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6017 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6018 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6018 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6019 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index aee5256e522b..e816bbb9fbf9 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.9" 35#define DRV_VERSION "2.1.1.10"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 4f1710e31eb4..8b9cad5e9712 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1126,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1126 if (err) 1126 if (err)
1127 return err; 1127 return err;
1128 1128
1129 enic_reset_addr_lists(enic);
1130
1129 switch (enic->pp.request) { 1131 switch (enic->pp.request) {
1130 1132
1131 case PORT_REQUEST_ASSOCIATE: 1133 case PORT_REQUEST_ASSOCIATE:
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 0cb1cf9cf4b0..a59cf961a436 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,8 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#include <linux/capability.h> 116#include <linux/capability.h>
115#include <linux/module.h> 117#include <linux/module.h>
116#include <linux/kernel.h> 118#include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
162} 164}
163 165
164static const char version[] __initconst = 166static const char version[] __initconst =
165 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 167 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
166 168
167static const struct net_device_ops eql_netdev_ops = { 169static const struct net_device_ops eql_netdev_ops = {
168 .ndo_open = eql_open, 170 .ndo_open = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
204 equalizer_t *eql = netdev_priv(dev); 206 equalizer_t *eql = netdev_priv(dev);
205 207
206 /* XXX We should force this off automatically for the user. */ 208 /* XXX We should force this off automatically for the user. */
207 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 209 netdev_info(dev,
208 "your slave devices.\n", dev->name); 210 "remember to turn off Van-Jacobson compression on your slave devices\n");
209 211
210 BUG_ON(!list_empty(&eql->queue.all_slaves)); 212 BUG_ON(!list_empty(&eql->queue.all_slaves));
211 213
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
591{ 593{
592 int err; 594 int err;
593 595
594 printk(version); 596 pr_info("%s\n", version);
595 597
596 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 598 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
597 if (!dev_eql) 599 if (!dev_eql)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 634c0daeecec..885d8baff7d5 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
74 }, { 74 }, {
75 .name = "imx28-fec", 75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 } 77 },
78 { }
78}; 79};
79 80
80static unsigned char macaddr[ETH_ALEN]; 81static unsigned char macaddr[ETH_ALEN];
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644
index 000000000000..df70368bf317
--- /dev/null
+++ b/drivers/net/ftmac100.c
@@ -0,0 +1,1196 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/init.h>
28#include <linux/io.h>
29#include <linux/mii.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33
34#include "ftmac100.h"
35
36#define DRV_NAME "ftmac100"
37#define DRV_VERSION "0.2"
38
39#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
40#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
41
42#define MAX_PKT_SIZE 1518
43#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
44
45#if MAX_PKT_SIZE > 0x7ff
46#error invalid MAX_PKT_SIZE
47#endif
48
49#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
50#error invalid RX_BUF_SIZE
51#endif
52
53/******************************************************************************
54 * private data
55 *****************************************************************************/
56struct ftmac100_descs {
57 struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
58 struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
59};
60
61struct ftmac100 {
62 struct resource *res;
63 void __iomem *base;
64 int irq;
65
66 struct ftmac100_descs *descs;
67 dma_addr_t descs_dma_addr;
68
69 unsigned int rx_pointer;
70 unsigned int tx_clean_pointer;
71 unsigned int tx_pointer;
72 unsigned int tx_pending;
73
74 spinlock_t tx_lock;
75
76 struct net_device *netdev;
77 struct device *dev;
78 struct napi_struct napi;
79
80 struct mii_if_info mii;
81};
82
83static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes);
84
85/******************************************************************************
86 * internal functions (hardware register access)
87 *****************************************************************************/
88#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
89 FTMAC100_INT_NORXBUF | \
90 FTMAC100_INT_XPKT_OK | \
91 FTMAC100_INT_XPKT_LOST | \
92 FTMAC100_INT_RPKT_LOST | \
93 FTMAC100_INT_AHB_ERR | \
94 FTMAC100_INT_PHYSTS_CHG)
95
96#define INT_MASK_ALL_DISABLED 0
97
98static void ftmac100_enable_all_int(struct ftmac100 *priv)
99{
100 iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
101}
102
103static void ftmac100_disable_all_int(struct ftmac100 *priv)
104{
105 iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
106}
107
108static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
109{
110 iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
111}
112
113static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
114{
115 iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
116}
117
118static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
119{
120 iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
121}
122
123static int ftmac100_reset(struct ftmac100 *priv)
124{
125 struct net_device *netdev = priv->netdev;
126 int i;
127
128 /* NOTE: reset clears all registers */
129 iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
130
131 for (i = 0; i < 5; i++) {
132 unsigned int maccr;
133
134 maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
135 if (!(maccr & FTMAC100_MACCR_SW_RST)) {
136 /*
137 * FTMAC100_MACCR_SW_RST cleared does not indicate
138 * that hardware reset completed (what the f*ck).
139 * We still need to wait for a while.
140 */
141 usleep_range(500, 1000);
142 return 0;
143 }
144
145 usleep_range(1000, 10000);
146 }
147
148 netdev_err(netdev, "software reset failed\n");
149 return -EIO;
150}
151
152static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
153{
154 unsigned int maddr = mac[0] << 8 | mac[1];
155 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
156
157 iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
158 iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
159}
160
161#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
162 FTMAC100_MACCR_RCV_EN | \
163 FTMAC100_MACCR_XDMA_EN | \
164 FTMAC100_MACCR_RDMA_EN | \
165 FTMAC100_MACCR_CRC_APD | \
166 FTMAC100_MACCR_FULLDUP | \
167 FTMAC100_MACCR_RX_RUNT | \
168 FTMAC100_MACCR_RX_BROADPKT)
169
170static int ftmac100_start_hw(struct ftmac100 *priv)
171{
172 struct net_device *netdev = priv->netdev;
173
174 if (ftmac100_reset(priv))
175 return -EIO;
176
177 /* setup ring buffer base registers */
178 ftmac100_set_rx_ring_base(priv,
179 priv->descs_dma_addr +
180 offsetof(struct ftmac100_descs, rxdes));
181 ftmac100_set_tx_ring_base(priv,
182 priv->descs_dma_addr +
183 offsetof(struct ftmac100_descs, txdes));
184
185 iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
186
187 ftmac100_set_mac(priv, netdev->dev_addr);
188
189 iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
190 return 0;
191}
192
193static void ftmac100_stop_hw(struct ftmac100 *priv)
194{
195 iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
196}
197
198/******************************************************************************
199 * internal functions (receive descriptor)
200 *****************************************************************************/
201static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
202{
203 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
204}
205
206static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
207{
208 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
209}
210
211static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
212{
213 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
214}
215
216static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
217{
218 /* clear status bits */
219 rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
220}
221
222static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
223{
224 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
225}
226
227static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
228{
229 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
230}
231
232static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
233{
234 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
235}
236
237static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
238{
239 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
240}
241
242static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
243{
244 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
245}
246
247static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
248{
249 return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
250}
251
252static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
253{
254 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
255}
256
257static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
258 unsigned int size)
259{
260 rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
261 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
262}
263
264static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
265{
266 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
267}
268
269static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
270 dma_addr_t addr)
271{
272 rxdes->rxdes2 = cpu_to_le32(addr);
273}
274
275static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
276{
277 return le32_to_cpu(rxdes->rxdes2);
278}
279
280/*
281 * rxdes3 is not used by hardware. We use it to keep track of page.
282 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
283 */
284static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
285{
286 rxdes->rxdes3 = (unsigned int)page;
287}
288
289static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
290{
291 return (struct page *)rxdes->rxdes3;
292}
293
294/******************************************************************************
295 * internal functions (receive)
296 *****************************************************************************/
297static int ftmac100_next_rx_pointer(int pointer)
298{
299 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
300}
301
302static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
303{
304 priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
305}
306
307static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
308{
309 return &priv->descs->rxdes[priv->rx_pointer];
310}
311
312static struct ftmac100_rxdes *
313ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
314{
315 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
316
317 while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
318 if (ftmac100_rxdes_first_segment(rxdes))
319 return rxdes;
320
321 ftmac100_rxdes_set_dma_own(rxdes);
322 ftmac100_rx_pointer_advance(priv);
323 rxdes = ftmac100_current_rxdes(priv);
324 }
325
326 return NULL;
327}
328
329static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
330 struct ftmac100_rxdes *rxdes)
331{
332 struct net_device *netdev = priv->netdev;
333 bool error = false;
334
335 if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
336 if (net_ratelimit())
337 netdev_info(netdev, "rx err\n");
338
339 netdev->stats.rx_errors++;
340 error = true;
341 }
342
343 if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
344 if (net_ratelimit())
345 netdev_info(netdev, "rx crc err\n");
346
347 netdev->stats.rx_crc_errors++;
348 error = true;
349 }
350
351 if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
352 if (net_ratelimit())
353 netdev_info(netdev, "rx frame too long\n");
354
355 netdev->stats.rx_length_errors++;
356 error = true;
357 } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
358 if (net_ratelimit())
359 netdev_info(netdev, "rx runt\n");
360
361 netdev->stats.rx_length_errors++;
362 error = true;
363 } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
364 if (net_ratelimit())
365 netdev_info(netdev, "rx odd nibble\n");
366
367 netdev->stats.rx_length_errors++;
368 error = true;
369 }
370
371 return error;
372}
373
374static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
375{
376 struct net_device *netdev = priv->netdev;
377 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
378 bool done = false;
379
380 if (net_ratelimit())
381 netdev_dbg(netdev, "drop packet %p\n", rxdes);
382
383 do {
384 if (ftmac100_rxdes_last_segment(rxdes))
385 done = true;
386
387 ftmac100_rxdes_set_dma_own(rxdes);
388 ftmac100_rx_pointer_advance(priv);
389 rxdes = ftmac100_current_rxdes(priv);
390 } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
391
392 netdev->stats.rx_dropped++;
393}
394
395static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
396{
397 struct net_device *netdev = priv->netdev;
398 struct ftmac100_rxdes *rxdes;
399 struct sk_buff *skb;
400 struct page *page;
401 dma_addr_t map;
402 int length;
403
404 rxdes = ftmac100_rx_locate_first_segment(priv);
405 if (!rxdes)
406 return false;
407
408 if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
409 ftmac100_rx_drop_packet(priv);
410 return true;
411 }
412
413 /*
414 * It is impossible to get multi-segment packets
415 * because we always provide big enough receive buffers.
416 */
417 if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
418 BUG();
419
420 /* start processing */
421 skb = netdev_alloc_skb_ip_align(netdev, 128);
422 if (unlikely(!skb)) {
423 if (net_ratelimit())
424 netdev_err(netdev, "rx skb alloc failed\n");
425
426 ftmac100_rx_drop_packet(priv);
427 return true;
428 }
429
430 if (unlikely(ftmac100_rxdes_multicast(rxdes)))
431 netdev->stats.multicast++;
432
433 map = ftmac100_rxdes_get_dma_addr(rxdes);
434 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
435
436 length = ftmac100_rxdes_frame_length(rxdes);
437 page = ftmac100_rxdes_get_page(rxdes);
438 skb_fill_page_desc(skb, 0, page, 0, length);
439 skb->len += length;
440 skb->data_len += length;
441 skb->truesize += length;
442 __pskb_pull_tail(skb, min(length, 64));
443
444 ftmac100_alloc_rx_page(priv, rxdes);
445
446 ftmac100_rx_pointer_advance(priv);
447
448 skb->protocol = eth_type_trans(skb, netdev);
449
450 netdev->stats.rx_packets++;
451 netdev->stats.rx_bytes += skb->len;
452
453 /* push packet to protocol stack */
454 netif_receive_skb(skb);
455
456 (*processed)++;
457 return true;
458}
459
460/******************************************************************************
461 * internal functions (transmit descriptor)
462 *****************************************************************************/
463static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
464{
465 /* clear all except end of ring bit */
466 txdes->txdes0 = 0;
467 txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
468 txdes->txdes2 = 0;
469 txdes->txdes3 = 0;
470}
471
472static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
473{
474 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
475}
476
477static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
478{
479 /*
480 * Make sure dma own bit will not be set before any other
481 * descriptor fields.
482 */
483 wmb();
484 txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
485}
486
487static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
488{
489 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
490}
491
492static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
493{
494 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
495}
496
497static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
498{
499 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
500}
501
502static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
503{
504 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
505}
506
507static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
508{
509 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
510}
511
512static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
513{
514 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
515}
516
517static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
518 unsigned int len)
519{
520 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
521}
522
523static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
524 dma_addr_t addr)
525{
526 txdes->txdes2 = cpu_to_le32(addr);
527}
528
529static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
530{
531 return le32_to_cpu(txdes->txdes2);
532}
533
534/*
535 * txdes3 is not used by hardware. We use it to keep track of socket buffer.
536 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
537 */
538static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
539{
540 txdes->txdes3 = (unsigned int)skb;
541}
542
543static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
544{
545 return (struct sk_buff *)txdes->txdes3;
546}
547
548/******************************************************************************
549 * internal functions (transmit)
550 *****************************************************************************/
551static int ftmac100_next_tx_pointer(int pointer)
552{
553 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
554}
555
556static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
557{
558 priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
559}
560
561static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
562{
563 priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
564}
565
566static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
567{
568 return &priv->descs->txdes[priv->tx_pointer];
569}
570
571static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
572{
573 return &priv->descs->txdes[priv->tx_clean_pointer];
574}
575
576static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
577{
578 struct net_device *netdev = priv->netdev;
579 struct ftmac100_txdes *txdes;
580 struct sk_buff *skb;
581 dma_addr_t map;
582
583 if (priv->tx_pending == 0)
584 return false;
585
586 txdes = ftmac100_current_clean_txdes(priv);
587
588 if (ftmac100_txdes_owned_by_dma(txdes))
589 return false;
590
591 skb = ftmac100_txdes_get_skb(txdes);
592 map = ftmac100_txdes_get_dma_addr(txdes);
593
594 if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
595 ftmac100_txdes_late_collision(txdes))) {
596 /*
597 * packet transmitted to ethernet lost due to late collision
598 * or excessive collision
599 */
600 netdev->stats.tx_aborted_errors++;
601 } else {
602 netdev->stats.tx_packets++;
603 netdev->stats.tx_bytes += skb->len;
604 }
605
606 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
607 dev_kfree_skb(skb);
608
609 ftmac100_txdes_reset(txdes);
610
611 ftmac100_tx_clean_pointer_advance(priv);
612
613 spin_lock(&priv->tx_lock);
614 priv->tx_pending--;
615 spin_unlock(&priv->tx_lock);
616 netif_wake_queue(netdev);
617
618 return true;
619}
620
621static void ftmac100_tx_complete(struct ftmac100 *priv)
622{
623 while (ftmac100_tx_complete_packet(priv))
624 ;
625}
626
627static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
628 dma_addr_t map)
629{
630 struct net_device *netdev = priv->netdev;
631 struct ftmac100_txdes *txdes;
632 unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
633
634 txdes = ftmac100_current_txdes(priv);
635 ftmac100_tx_pointer_advance(priv);
636
637 /* setup TX descriptor */
638 ftmac100_txdes_set_skb(txdes, skb);
639 ftmac100_txdes_set_dma_addr(txdes, map);
640
641 ftmac100_txdes_set_first_segment(txdes);
642 ftmac100_txdes_set_last_segment(txdes);
643 ftmac100_txdes_set_txint(txdes);
644 ftmac100_txdes_set_buffer_size(txdes, len);
645
646 spin_lock(&priv->tx_lock);
647 priv->tx_pending++;
648 if (priv->tx_pending == TX_QUEUE_ENTRIES)
649 netif_stop_queue(netdev);
650
651 /* start transmit */
652 ftmac100_txdes_set_dma_own(txdes);
653 spin_unlock(&priv->tx_lock);
654
655 ftmac100_txdma_start_polling(priv);
656 return NETDEV_TX_OK;
657}
658
659/******************************************************************************
660 * internal functions (buffer)
661 *****************************************************************************/
662static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes)
663{
664 struct net_device *netdev = priv->netdev;
665 struct page *page;
666 dma_addr_t map;
667
668 page = alloc_page(GFP_KERNEL);
669 if (!page) {
670 if (net_ratelimit())
671 netdev_err(netdev, "failed to allocate rx page\n");
672 return -ENOMEM;
673 }
674
675 map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
676 if (unlikely(dma_mapping_error(priv->dev, map))) {
677 if (net_ratelimit())
678 netdev_err(netdev, "failed to map rx page\n");
679 __free_page(page);
680 return -ENOMEM;
681 }
682
683 ftmac100_rxdes_set_page(rxdes, page);
684 ftmac100_rxdes_set_dma_addr(rxdes, map);
685 ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
686 ftmac100_rxdes_set_dma_own(rxdes);
687 return 0;
688}
689
690static void ftmac100_free_buffers(struct ftmac100 *priv)
691{
692 int i;
693
694 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
695 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
696 struct page *page = ftmac100_rxdes_get_page(rxdes);
697 dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
698
699 if (!page)
700 continue;
701
702 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
703 __free_page(page);
704 }
705
706 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
707 struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
708 struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
709 dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
710
711 if (!skb)
712 continue;
713
714 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
715 dev_kfree_skb(skb);
716 }
717
718 dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
719 priv->descs, priv->descs_dma_addr);
720}
721
722static int ftmac100_alloc_buffers(struct ftmac100 *priv)
723{
724 int i;
725
726 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
727 &priv->descs_dma_addr, GFP_KERNEL);
728 if (!priv->descs)
729 return -ENOMEM;
730
731 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
732
733 /* initialize RX ring */
734 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
735
736 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
737 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
738
739 if (ftmac100_alloc_rx_page(priv, rxdes))
740 goto err;
741 }
742
743 /* initialize TX ring */
744 ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
745 return 0;
746
747err:
748 ftmac100_free_buffers(priv);
749 return -ENOMEM;
750}
751
752/******************************************************************************
753 * struct mii_if_info functions
754 *****************************************************************************/
755static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
756{
757 struct ftmac100 *priv = netdev_priv(netdev);
758 unsigned int phycr;
759 int i;
760
761 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
762 FTMAC100_PHYCR_REGAD(reg) |
763 FTMAC100_PHYCR_MIIRD;
764
765 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
766
767 for (i = 0; i < 10; i++) {
768 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
769
770 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
771 return phycr & FTMAC100_PHYCR_MIIRDATA;
772
773 usleep_range(100, 1000);
774 }
775
776 netdev_err(netdev, "mdio read timed out\n");
777 return 0;
778}
779
780static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
781 int data)
782{
783 struct ftmac100 *priv = netdev_priv(netdev);
784 unsigned int phycr;
785 int i;
786
787 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
788 FTMAC100_PHYCR_REGAD(reg) |
789 FTMAC100_PHYCR_MIIWR;
790
791 data = FTMAC100_PHYWDATA_MIIWDATA(data);
792
793 iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
794 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
795
796 for (i = 0; i < 10; i++) {
797 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
798
799 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
800 return;
801
802 usleep_range(100, 1000);
803 }
804
805 netdev_err(netdev, "mdio write timed out\n");
806}
807
808/******************************************************************************
809 * struct ethtool_ops functions
810 *****************************************************************************/
811static void ftmac100_get_drvinfo(struct net_device *netdev,
812 struct ethtool_drvinfo *info)
813{
814 strcpy(info->driver, DRV_NAME);
815 strcpy(info->version, DRV_VERSION);
816 strcpy(info->bus_info, dev_name(&netdev->dev));
817}
818
819static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
820{
821 struct ftmac100 *priv = netdev_priv(netdev);
822 return mii_ethtool_gset(&priv->mii, cmd);
823}
824
825static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
826{
827 struct ftmac100 *priv = netdev_priv(netdev);
828 return mii_ethtool_sset(&priv->mii, cmd);
829}
830
831static int ftmac100_nway_reset(struct net_device *netdev)
832{
833 struct ftmac100 *priv = netdev_priv(netdev);
834 return mii_nway_restart(&priv->mii);
835}
836
837static u32 ftmac100_get_link(struct net_device *netdev)
838{
839 struct ftmac100 *priv = netdev_priv(netdev);
840 return mii_link_ok(&priv->mii);
841}
842
843static const struct ethtool_ops ftmac100_ethtool_ops = {
844 .set_settings = ftmac100_set_settings,
845 .get_settings = ftmac100_get_settings,
846 .get_drvinfo = ftmac100_get_drvinfo,
847 .nway_reset = ftmac100_nway_reset,
848 .get_link = ftmac100_get_link,
849};
850
851/******************************************************************************
852 * interrupt handler
853 *****************************************************************************/
854static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
855{
856 struct net_device *netdev = dev_id;
857 struct ftmac100 *priv = netdev_priv(netdev);
858
859 if (likely(netif_running(netdev))) {
860 /* Disable interrupts for polling */
861 ftmac100_disable_all_int(priv);
862 napi_schedule(&priv->napi);
863 }
864
865 return IRQ_HANDLED;
866}
867
868/******************************************************************************
869 * struct napi_struct functions
870 *****************************************************************************/
871static int ftmac100_poll(struct napi_struct *napi, int budget)
872{
873 struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
874 struct net_device *netdev = priv->netdev;
875 unsigned int status;
876 bool completed = true;
877 int rx = 0;
878
879 status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
880
881 if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
882 /*
883 * FTMAC100_INT_RPKT_FINISH:
884 * RX DMA has received packets into RX buffer successfully
885 *
886 * FTMAC100_INT_NORXBUF:
887 * RX buffer unavailable
888 */
889 bool retry;
890
891 do {
892 retry = ftmac100_rx_packet(priv, &rx);
893 } while (retry && rx < budget);
894
895 if (retry && rx == budget)
896 completed = false;
897 }
898
899 if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
900 /*
901 * FTMAC100_INT_XPKT_OK:
902 * packet transmitted to ethernet successfully
903 *
904 * FTMAC100_INT_XPKT_LOST:
905 * packet transmitted to ethernet lost due to late
906 * collision or excessive collision
907 */
908 ftmac100_tx_complete(priv);
909 }
910
911 if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
912 FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
913 if (net_ratelimit())
914 netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
915 status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
916 status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
917 status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
918 status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
919
920 if (status & FTMAC100_INT_NORXBUF) {
921 /* RX buffer unavailable */
922 netdev->stats.rx_over_errors++;
923 }
924
925 if (status & FTMAC100_INT_RPKT_LOST) {
926 /* received packet lost due to RX FIFO full */
927 netdev->stats.rx_fifo_errors++;
928 }
929
930 if (status & FTMAC100_INT_PHYSTS_CHG) {
931 /* PHY link status change */
932 mii_check_link(&priv->mii);
933 }
934 }
935
936 if (completed) {
937 /* stop polling */
938 napi_complete(napi);
939 ftmac100_enable_all_int(priv);
940 }
941
942 return rx;
943}
944
945/******************************************************************************
946 * struct net_device_ops functions
947 *****************************************************************************/
948static int ftmac100_open(struct net_device *netdev)
949{
950 struct ftmac100 *priv = netdev_priv(netdev);
951 int err;
952
953 err = ftmac100_alloc_buffers(priv);
954 if (err) {
955 netdev_err(netdev, "failed to allocate buffers\n");
956 goto err_alloc;
957 }
958
959 err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
960 if (err) {
961 netdev_err(netdev, "failed to request irq %d\n", priv->irq);
962 goto err_irq;
963 }
964
965 priv->rx_pointer = 0;
966 priv->tx_clean_pointer = 0;
967 priv->tx_pointer = 0;
968 priv->tx_pending = 0;
969
970 err = ftmac100_start_hw(priv);
971 if (err)
972 goto err_hw;
973
974 napi_enable(&priv->napi);
975 netif_start_queue(netdev);
976
977 ftmac100_enable_all_int(priv);
978
979 return 0;
980
981err_hw:
982 free_irq(priv->irq, netdev);
983err_irq:
984 ftmac100_free_buffers(priv);
985err_alloc:
986 return err;
987}
988
989static int ftmac100_stop(struct net_device *netdev)
990{
991 struct ftmac100 *priv = netdev_priv(netdev);
992
993 ftmac100_disable_all_int(priv);
994 netif_stop_queue(netdev);
995 napi_disable(&priv->napi);
996 ftmac100_stop_hw(priv);
997 free_irq(priv->irq, netdev);
998 ftmac100_free_buffers(priv);
999
1000 return 0;
1001}
1002
1003static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1004{
1005 struct ftmac100 *priv = netdev_priv(netdev);
1006 dma_addr_t map;
1007
1008 if (unlikely(skb->len > MAX_PKT_SIZE)) {
1009 if (net_ratelimit())
1010 netdev_dbg(netdev, "tx packet too big\n");
1011
1012 netdev->stats.tx_dropped++;
1013 dev_kfree_skb(skb);
1014 return NETDEV_TX_OK;
1015 }
1016
1017 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1018 if (unlikely(dma_mapping_error(priv->dev, map))) {
1019 /* drop packet */
1020 if (net_ratelimit())
1021 netdev_err(netdev, "map socket buffer failed\n");
1022
1023 netdev->stats.tx_dropped++;
1024 dev_kfree_skb(skb);
1025 return NETDEV_TX_OK;
1026 }
1027
1028 return ftmac100_xmit(priv, skb, map);
1029}
1030
1031/* optional */
1032static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1033{
1034 struct ftmac100 *priv = netdev_priv(netdev);
1035 struct mii_ioctl_data *data = if_mii(ifr);
1036
1037 return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
1038}
1039
1040static const struct net_device_ops ftmac100_netdev_ops = {
1041 .ndo_open = ftmac100_open,
1042 .ndo_stop = ftmac100_stop,
1043 .ndo_start_xmit = ftmac100_hard_start_xmit,
1044 .ndo_set_mac_address = eth_mac_addr,
1045 .ndo_validate_addr = eth_validate_addr,
1046 .ndo_do_ioctl = ftmac100_do_ioctl,
1047};
1048
1049/******************************************************************************
1050 * struct platform_driver functions
1051 *****************************************************************************/
1052static int ftmac100_probe(struct platform_device *pdev)
1053{
1054 struct resource *res;
1055 int irq;
1056 struct net_device *netdev;
1057 struct ftmac100 *priv;
1058 int err;
1059
1060 if (!pdev)
1061 return -ENODEV;
1062
1063 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 if (!res)
1065 return -ENXIO;
1066
1067 irq = platform_get_irq(pdev, 0);
1068 if (irq < 0)
1069 return irq;
1070
1071 /* setup net_device */
1072 netdev = alloc_etherdev(sizeof(*priv));
1073 if (!netdev) {
1074 err = -ENOMEM;
1075 goto err_alloc_etherdev;
1076 }
1077
1078 SET_NETDEV_DEV(netdev, &pdev->dev);
1079 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
1080 netdev->netdev_ops = &ftmac100_netdev_ops;
1081
1082 platform_set_drvdata(pdev, netdev);
1083
1084 /* setup private data */
1085 priv = netdev_priv(netdev);
1086 priv->netdev = netdev;
1087 priv->dev = &pdev->dev;
1088
1089 spin_lock_init(&priv->tx_lock);
1090
1091 /* initialize NAPI */
1092 netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
1093
1094 /* map io memory */
1095 priv->res = request_mem_region(res->start, resource_size(res),
1096 dev_name(&pdev->dev));
1097 if (!priv->res) {
1098 dev_err(&pdev->dev, "Could not reserve memory region\n");
1099 err = -ENOMEM;
1100 goto err_req_mem;
1101 }
1102
1103 priv->base = ioremap(res->start, res->end - res->start);
1104 if (!priv->base) {
1105 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1106 err = -EIO;
1107 goto err_ioremap;
1108 }
1109
1110 priv->irq = irq;
1111
1112 /* initialize struct mii_if_info */
1113 priv->mii.phy_id = 0;
1114 priv->mii.phy_id_mask = 0x1f;
1115 priv->mii.reg_num_mask = 0x1f;
1116 priv->mii.dev = netdev;
1117 priv->mii.mdio_read = ftmac100_mdio_read;
1118 priv->mii.mdio_write = ftmac100_mdio_write;
1119
1120 /* register network device */
1121 err = register_netdev(netdev);
1122 if (err) {
1123 dev_err(&pdev->dev, "Failed to register netdev\n");
1124 goto err_register_netdev;
1125 }
1126
1127 netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
1128
1129 if (!is_valid_ether_addr(netdev->dev_addr)) {
1130 random_ether_addr(netdev->dev_addr);
1131 netdev_info(netdev, "generated random MAC address %pM\n",
1132 netdev->dev_addr);
1133 }
1134
1135 return 0;
1136
1137err_register_netdev:
1138 iounmap(priv->base);
1139err_ioremap:
1140 release_resource(priv->res);
1141err_req_mem:
1142 netif_napi_del(&priv->napi);
1143 platform_set_drvdata(pdev, NULL);
1144 free_netdev(netdev);
1145err_alloc_etherdev:
1146 return err;
1147}
1148
1149static int __exit ftmac100_remove(struct platform_device *pdev)
1150{
1151 struct net_device *netdev;
1152 struct ftmac100 *priv;
1153
1154 netdev = platform_get_drvdata(pdev);
1155 priv = netdev_priv(netdev);
1156
1157 unregister_netdev(netdev);
1158
1159 iounmap(priv->base);
1160 release_resource(priv->res);
1161
1162 netif_napi_del(&priv->napi);
1163 platform_set_drvdata(pdev, NULL);
1164 free_netdev(netdev);
1165 return 0;
1166}
1167
1168static struct platform_driver ftmac100_driver = {
1169 .probe = ftmac100_probe,
1170 .remove = __exit_p(ftmac100_remove),
1171 .driver = {
1172 .name = DRV_NAME,
1173 .owner = THIS_MODULE,
1174 },
1175};
1176
1177/******************************************************************************
1178 * initialization / finalization
1179 *****************************************************************************/
1180static int __init ftmac100_init(void)
1181{
1182 pr_info("Loading version " DRV_VERSION " ...\n");
1183 return platform_driver_register(&ftmac100_driver);
1184}
1185
1186static void __exit ftmac100_exit(void)
1187{
1188 platform_driver_unregister(&ftmac100_driver);
1189}
1190
1191module_init(ftmac100_init);
1192module_exit(ftmac100_exit);
1193
1194MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1195MODULE_DESCRIPTION("FTMAC100 driver");
1196MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644
index 000000000000..46a0c47b1ee1
--- /dev/null
+++ b/drivers/net/ftmac100.h
@@ -0,0 +1,180 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef __FTMAC100_H
23#define __FTMAC100_H
24
25#define FTMAC100_OFFSET_ISR 0x00
26#define FTMAC100_OFFSET_IMR 0x04
27#define FTMAC100_OFFSET_MAC_MADR 0x08
28#define FTMAC100_OFFSET_MAC_LADR 0x0c
29#define FTMAC100_OFFSET_MAHT0 0x10
30#define FTMAC100_OFFSET_MAHT1 0x14
31#define FTMAC100_OFFSET_TXPD 0x18
32#define FTMAC100_OFFSET_RXPD 0x1c
33#define FTMAC100_OFFSET_TXR_BADR 0x20
34#define FTMAC100_OFFSET_RXR_BADR 0x24
35#define FTMAC100_OFFSET_ITC 0x28
36#define FTMAC100_OFFSET_APTC 0x2c
37#define FTMAC100_OFFSET_DBLAC 0x30
38#define FTMAC100_OFFSET_MACCR 0x88
39#define FTMAC100_OFFSET_MACSR 0x8c
40#define FTMAC100_OFFSET_PHYCR 0x90
41#define FTMAC100_OFFSET_PHYWDATA 0x94
42#define FTMAC100_OFFSET_FCR 0x98
43#define FTMAC100_OFFSET_BPR 0x9c
44#define FTMAC100_OFFSET_TS 0xc4
45#define FTMAC100_OFFSET_DMAFIFOS 0xc8
46#define FTMAC100_OFFSET_TM 0xcc
47#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
48#define FTMAC100_OFFSET_RPF_AEP 0xd8
49#define FTMAC100_OFFSET_XM_PG 0xdc
50#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
51#define FTMAC100_OFFSET_CRCER_FTL 0xe4
52#define FTMAC100_OFFSET_RLC_RCC 0xe8
53#define FTMAC100_OFFSET_BROC 0xec
54#define FTMAC100_OFFSET_MULCA 0xf0
55#define FTMAC100_OFFSET_RP 0xf4
56#define FTMAC100_OFFSET_XP 0xf8
57
58/*
59 * Interrupt status register & interrupt mask register
60 */
61#define FTMAC100_INT_RPKT_FINISH (1 << 0)
62#define FTMAC100_INT_NORXBUF (1 << 1)
63#define FTMAC100_INT_XPKT_FINISH (1 << 2)
64#define FTMAC100_INT_NOTXBUF (1 << 3)
65#define FTMAC100_INT_XPKT_OK (1 << 4)
66#define FTMAC100_INT_XPKT_LOST (1 << 5)
67#define FTMAC100_INT_RPKT_SAV (1 << 6)
68#define FTMAC100_INT_RPKT_LOST (1 << 7)
69#define FTMAC100_INT_AHB_ERR (1 << 8)
70#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
71
72/*
73 * Interrupt timer control register
74 */
75#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
76#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
77#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
78#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
79#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
80#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
81
82/*
83 * Automatic polling timer control register
84 */
85#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
86#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
87#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
88#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
89
90/*
91 * DMA burst length and arbitration control register
92 */
93#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
94#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
95#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
96#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
97#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
98#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
99
100/*
101 * MAC control register
102 */
103#define FTMAC100_MACCR_XDMA_EN (1 << 0)
104#define FTMAC100_MACCR_RDMA_EN (1 << 1)
105#define FTMAC100_MACCR_SW_RST (1 << 2)
106#define FTMAC100_MACCR_LOOP_EN (1 << 3)
107#define FTMAC100_MACCR_CRC_DIS (1 << 4)
108#define FTMAC100_MACCR_XMT_EN (1 << 5)
109#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
110#define FTMAC100_MACCR_RCV_EN (1 << 8)
111#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
112#define FTMAC100_MACCR_RX_RUNT (1 << 10)
113#define FTMAC100_MACCR_RX_FTL (1 << 11)
114#define FTMAC100_MACCR_RCV_ALL (1 << 12)
115#define FTMAC100_MACCR_CRC_APD (1 << 14)
116#define FTMAC100_MACCR_FULLDUP (1 << 15)
117#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
118#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
119
120/*
121 * PHY control register
122 */
123#define FTMAC100_PHYCR_MIIRDATA 0xffff
124#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
125#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
126#define FTMAC100_PHYCR_MIIRD (1 << 26)
127#define FTMAC100_PHYCR_MIIWR (1 << 27)
128
129/*
130 * PHY write data register
131 */
132#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
133
134/*
135 * Transmit descriptor, aligned to 16 bytes
136 */
137struct ftmac100_txdes {
138 unsigned int txdes0;
139 unsigned int txdes1;
140 unsigned int txdes2; /* TXBUF_BADR */
141 unsigned int txdes3; /* not used by HW */
142} __attribute__ ((aligned(16)));
143
144#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
145#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
146#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
147
148#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
149#define FTMAC100_TXDES1_LTS (1 << 27)
150#define FTMAC100_TXDES1_FTS (1 << 28)
151#define FTMAC100_TXDES1_TX2FIC (1 << 29)
152#define FTMAC100_TXDES1_TXIC (1 << 30)
153#define FTMAC100_TXDES1_EDOTR (1 << 31)
154
155/*
156 * Receive descriptor, aligned to 16 bytes
157 */
158struct ftmac100_rxdes {
159 unsigned int rxdes0;
160 unsigned int rxdes1;
161 unsigned int rxdes2; /* RXBUF_BADR */
162 unsigned int rxdes3; /* not used by HW */
163} __attribute__ ((aligned(16)));
164
165#define FTMAC100_RXDES0_RFL 0x7ff
166#define FTMAC100_RXDES0_MULTICAST (1 << 16)
167#define FTMAC100_RXDES0_BROADCAST (1 << 17)
168#define FTMAC100_RXDES0_RX_ERR (1 << 18)
169#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
170#define FTMAC100_RXDES0_FTL (1 << 20)
171#define FTMAC100_RXDES0_RUNT (1 << 21)
172#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
173#define FTMAC100_RXDES0_LRS (1 << 28)
174#define FTMAC100_RXDES0_FRS (1 << 29)
175#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
176
177#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
178#define FTMAC100_RXDES1_EDORR (1 << 31)
179
180#endif /* __FTMAC100_H */
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index ff46c91520af..92e11da25749 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -110,6 +110,7 @@
110/* Management Control */ 110/* Management Control */
111#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 111#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
112#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 112#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
113#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
113/* Enable Neighbor Discovery Filtering */ 114/* Enable Neighbor Discovery Filtering */
114#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 115#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
115#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 116#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 281324e85980..eec9ed735588 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -248,6 +248,10 @@ struct e1000_hw_stats {
248 u64 scvpc; 248 u64 scvpc;
249 u64 hrmpc; 249 u64 hrmpc;
250 u64 doosync; 250 u64 doosync;
251 u64 o2bgptc;
252 u64 o2bspc;
253 u64 b2ospc;
254 u64 b2ogprc;
251}; 255};
252 256
253struct e1000_phy_stats { 257struct e1000_phy_stats {
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 3a6f8471aea2..61713548c027 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -328,4 +328,11 @@
328 328
329/* DMA Coalescing registers */ 329/* DMA Coalescing registers */
330#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 330#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
331
332/* OS2BMC Registers */
333#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
334#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
335#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
336#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
337
331#endif 338#endif
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a70e16bcfa7e..78d420b4b2db 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
86 IGB_STAT("tx_smbus", stats.mgptc), 86 IGB_STAT("tx_smbus", stats.mgptc),
87 IGB_STAT("rx_smbus", stats.mgprc), 87 IGB_STAT("rx_smbus", stats.mgprc),
88 IGB_STAT("dropped_smbus", stats.mgpdc), 88 IGB_STAT("dropped_smbus", stats.mgpdc),
89 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
90 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
91 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
92 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
89}; 93};
90 94
91#define IGB_NETDEV_STAT(_net_stat) { \ 95#define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
603 regs_buff[548] = rd32(E1000_TDFT); 607 regs_buff[548] = rd32(E1000_TDFT);
604 regs_buff[549] = rd32(E1000_TDFHS); 608 regs_buff[549] = rd32(E1000_TDFHS);
605 regs_buff[550] = rd32(E1000_TDFPC); 609 regs_buff[550] = rd32(E1000_TDFPC);
606 610 regs_buff[551] = adapter->stats.o2bgptc;
611 regs_buff[552] = adapter->stats.b2ospc;
612 regs_buff[553] = adapter->stats.o2bspc;
613 regs_buff[554] = adapter->stats.b2ogprc;
607} 614}
608 615
609static int igb_get_eeprom_len(struct net_device *netdev) 616static int igb_get_eeprom_len(struct net_device *netdev)
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
727 char firmware_version[32]; 734 char firmware_version[32];
728 u16 eeprom_data; 735 u16 eeprom_data;
729 736
730 strncpy(drvinfo->driver, igb_driver_name, 32); 737 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
731 strncpy(drvinfo->version, igb_driver_version, 32); 738 strncpy(drvinfo->version, igb_driver_version,
739 sizeof(drvinfo->version) - 1);
732 740
733 /* EEPROM image version # is reported as firmware version # for 741 /* EEPROM image version # is reported as firmware version # for
734 * 82575 controllers */ 742 * 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
738 (eeprom_data & 0x0FF0) >> 4, 746 (eeprom_data & 0x0FF0) >> 4,
739 eeprom_data & 0x000F); 747 eeprom_data & 0x000F);
740 748
741 strncpy(drvinfo->fw_version, firmware_version, 32); 749 strncpy(drvinfo->fw_version, firmware_version,
742 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 750 sizeof(drvinfo->fw_version) - 1);
751 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
752 sizeof(drvinfo->bus_info) - 1);
743 drvinfo->n_stats = IGB_STATS_LEN; 753 drvinfo->n_stats = IGB_STATS_LEN;
744 drvinfo->testinfo_len = IGB_TEST_LEN; 754 drvinfo->testinfo_len = IGB_TEST_LEN;
745 drvinfo->regdump_len = igb_get_regs_len(netdev); 755 drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1070 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1080 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1071 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1081 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1072 wr32(reg, (_test[pat] & write)); 1082 wr32(reg, (_test[pat] & write));
1073 val = rd32(reg); 1083 val = rd32(reg) & mask;
1074 if (val != (_test[pat] & write & mask)) { 1084 if (val != (_test[pat] & write & mask)) {
1075 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1085 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
1076 "failed: got 0x%08X expected 0x%08X\n", 1086 "failed: got 0x%08X expected 0x%08X\n",
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 579dbba5f9e4..3666b967846a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2291,7 +2291,12 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2291 switch (hw->mac.type) { 2291 switch (hw->mac.type) {
2292 case e1000_82576: 2292 case e1000_82576:
2293 case e1000_i350: 2293 case e1000_i350:
2294 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2294 if (max_vfs > 7) {
2295 dev_warn(&pdev->dev,
2296 "Maximum of 7 VFs per PF, using max\n");
2297 adapter->vfs_allocated_count = 7;
2298 } else
2299 adapter->vfs_allocated_count = max_vfs;
2295 break; 2300 break;
2296 default: 2301 default:
2297 break; 2302 break;
@@ -4555,6 +4560,15 @@ void igb_update_stats(struct igb_adapter *adapter,
4555 adapter->stats.mgptc += rd32(E1000_MGTPTC); 4560 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4556 adapter->stats.mgprc += rd32(E1000_MGTPRC); 4561 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4557 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 4562 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4563
4564 /* OS2BMC Stats */
4565 reg = rd32(E1000_MANC);
4566 if (reg & E1000_MANC_EN_BMC2OS) {
4567 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4568 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4569 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4570 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4571 }
4558} 4572}
4559 4573
4560static irqreturn_t igb_msix_other(int irq, void *data) 4574static irqreturn_t igb_msix_other(int irq, void *data)
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ed6e3d910247..1d943aa7c7a6 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
201 struct igbvf_adapter *adapter = netdev_priv(netdev); 201 struct igbvf_adapter *adapter = netdev_priv(netdev);
202 struct e1000_hw *hw = &adapter->hw; 202 struct e1000_hw *hw = &adapter->hw;
203 u32 *regs_buff = p; 203 u32 *regs_buff = p;
204 u8 revision_id;
205 204
206 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); 205 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
207 206
208 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 207 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
209 208 adapter->pdev->device;
210 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
211 209
212 regs_buff[0] = er32(CTRL); 210 regs_buff[0] = er32(CTRL);
213 regs_buff[1] = er32(STATUS); 211 regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 42fdf5977be9..6ccc32fd7338 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2639,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2639 hw->device_id = pdev->device; 2639 hw->device_id = pdev->device;
2640 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2640 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2641 hw->subsystem_device_id = pdev->subsystem_device; 2641 hw->subsystem_device_id = pdev->subsystem_device;
2642 2642 hw->revision_id = pdev->revision;
2643 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2644 2643
2645 err = -EIO; 2644 err = -EIO;
2646 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2645 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009a..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
220 * The parameter rar_count will usually be hw->mac.rar_entry_count 220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this. 221 * unless there are workarounds that change this.
222 **/ 222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 223static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count, 224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count) 225 u32 rar_used_count, u32 rar_count)
226{ 226{
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa7..a5b0f0e194bb 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
2025 2025
2026 if (phyaddr != 0x1f) { 2026 if (phyaddr != 0x1f) {
2027 u16 mii_phyctrl, mii_1000cr; 2027 u16 mii_phyctrl, mii_1000cr;
2028 u8 revisionid = 0;
2029 2028
2030 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2029 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2030 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
2035 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2034 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2036 2035
2037 /* Set default phyparam */ 2036 /* Set default phyparam */
2038 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2037 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2040 2038
2041 /* Reset PHY */ 2039 /* Reset PHY */
2042 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2040 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 12769b58c2e7..1e546fc127d0 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -552,6 +552,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
552 struct sk_buff *skb); 552 struct sk_buff *skb);
553extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 553extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
554 struct scatterlist *sgl, unsigned int sgc); 554 struct scatterlist *sgl, unsigned int sgc);
555extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
556 struct scatterlist *sgl, unsigned int sgc);
555extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 557extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
556extern int ixgbe_fcoe_enable(struct net_device *netdev); 558extern int ixgbe_fcoe_enable(struct net_device *netdev);
557extern int ixgbe_fcoe_disable(struct net_device *netdev); 559extern int ixgbe_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index d0f1d9d2c416..ff23907bde0c 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -280,10 +280,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
280{ 280{
281 enum ixgbe_media_type media_type; 281 enum ixgbe_media_type media_type;
282 282
283 /* Detect if there is a copper PHY attached. */
284 switch (hw->phy.type) {
285 case ixgbe_phy_cu_unknown:
286 case ixgbe_phy_tn:
287 case ixgbe_phy_aq:
288 media_type = ixgbe_media_type_copper;
289 goto out;
290 default:
291 break;
292 }
293
283 /* Media type for I82598 is based on device ID */ 294 /* Media type for I82598 is based on device ID */
284 switch (hw->device_id) { 295 switch (hw->device_id) {
285 case IXGBE_DEV_ID_82598: 296 case IXGBE_DEV_ID_82598:
286 case IXGBE_DEV_ID_82598_BX: 297 case IXGBE_DEV_ID_82598_BX:
298 /* Default device ID is mezzanine card KX/KX4 */
287 media_type = ixgbe_media_type_backplane; 299 media_type = ixgbe_media_type_backplane;
288 break; 300 break;
289 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 301 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -306,7 +318,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
306 media_type = ixgbe_media_type_unknown; 318 media_type = ixgbe_media_type_unknown;
307 break; 319 break;
308 } 320 }
309 321out:
310 return media_type; 322 return media_type;
311} 323}
312 324
@@ -354,7 +366,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
354 366
355 /* Negotiate the fc mode to use */ 367 /* Negotiate the fc mode to use */
356 ret_val = ixgbe_fc_autoneg(hw); 368 ret_val = ixgbe_fc_autoneg(hw);
357 if (ret_val) 369 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
358 goto out; 370 goto out;
359 371
360 /* Disable any previous flow control settings */ 372 /* Disable any previous flow control settings */
@@ -372,10 +384,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
372 * 2: Tx flow control is enabled (we can send pause frames but 384 * 2: Tx flow control is enabled (we can send pause frames but
373 * we do not support receiving pause frames). 385 * we do not support receiving pause frames).
374 * 3: Both Rx and Tx flow control (symmetric) are enabled. 386 * 3: Both Rx and Tx flow control (symmetric) are enabled.
375 * other: Invalid.
376#ifdef CONFIG_DCB 387#ifdef CONFIG_DCB
377 * 4: Priority Flow Control is enabled. 388 * 4: Priority Flow Control is enabled.
378#endif 389#endif
390 * other: Invalid.
379 */ 391 */
380 switch (hw->fc.current_mode) { 392 switch (hw->fc.current_mode) {
381 case ixgbe_fc_none: 393 case ixgbe_fc_none:
@@ -432,9 +444,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
432 reg = (rx_pba_size - hw->fc.low_water) << 6; 444 reg = (rx_pba_size - hw->fc.low_water) << 6;
433 if (hw->fc.send_xon) 445 if (hw->fc.send_xon)
434 reg |= IXGBE_FCRTL_XONE; 446 reg |= IXGBE_FCRTL_XONE;
447
435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); 448 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436 449
437 reg = (rx_pba_size - hw->fc.high_water) << 10; 450 reg = (rx_pba_size - hw->fc.high_water) << 6;
438 reg |= IXGBE_FCRTH_FCEN; 451 reg |= IXGBE_FCRTH_FCEN;
439 452
440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); 453 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -627,13 +640,12 @@ out:
627 return 0; 640 return 0;
628} 641}
629 642
630
631/** 643/**
632 * ixgbe_setup_mac_link_82598 - Set MAC link speed 644 * ixgbe_setup_mac_link_82598 - Set MAC link speed
633 * @hw: pointer to hardware structure 645 * @hw: pointer to hardware structure
634 * @speed: new link speed 646 * @speed: new link speed
635 * @autoneg: true if auto-negotiation enabled 647 * @autoneg: true if auto-negotiation enabled
636 * @autoneg_wait_to_complete: true if waiting is needed to complete 648 * @autoneg_wait_to_complete: true when waiting for completion is needed
637 * 649 *
638 * Set the link speed in the AUTOC register and restarts link. 650 * Set the link speed in the AUTOC register and restarts link.
639 **/ 651 **/
@@ -672,7 +684,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
672 * ixgbe_hw This will write the AUTOC register based on the new 684 * ixgbe_hw This will write the AUTOC register based on the new
673 * stored values 685 * stored values
674 */ 686 */
675 status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 687 status = ixgbe_start_mac_link_82598(hw,
688 autoneg_wait_to_complete);
676 } 689 }
677 690
678 return status; 691 return status;
@@ -698,7 +711,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
698 /* Setup the PHY according to input speed */ 711 /* Setup the PHY according to input speed */
699 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 712 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
700 autoneg_wait_to_complete); 713 autoneg_wait_to_complete);
701
702 /* Set up MAC */ 714 /* Set up MAC */
703 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 715 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
704 716
@@ -770,7 +782,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 782 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
771 goto no_phy_reset; 783 goto no_phy_reset;
772 784
773
774 hw->phy.ops.reset(hw); 785 hw->phy.ops.reset(hw);
775 } 786 }
776 787
@@ -779,12 +790,9 @@ no_phy_reset:
779 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 790 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
780 * access and verify no pending requests before reset 791 * access and verify no pending requests before reset
781 */ 792 */
782 status = ixgbe_disable_pcie_master(hw); 793 ixgbe_disable_pcie_master(hw);
783 if (status != 0) {
784 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
785 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
786 }
787 794
795mac_reset_top:
788 /* 796 /*
789 * Issue global reset to the MAC. This needs to be a SW reset. 797 * Issue global reset to the MAC. This needs to be a SW reset.
790 * If link reset is used, it might reset the MAC when mng is using it 798 * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +813,19 @@ no_phy_reset:
805 hw_dbg(hw, "Reset polling failed to complete.\n"); 813 hw_dbg(hw, "Reset polling failed to complete.\n");
806 } 814 }
807 815
816 /*
817 * Double resets are required for recovery from certain error
818 * conditions. Between resets, it is necessary to stall to allow time
819 * for any pending HW events to complete. We use 1usec since that is
820 * what is needed for ixgbe_disable_pcie_master(). The second reset
821 * then clears out any effects of those events.
822 */
823 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
824 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
825 udelay(1);
826 goto mac_reset_top;
827 }
828
808 msleep(50); 829 msleep(50);
809 830
810 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 831 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +845,15 @@ no_phy_reset:
824 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 845 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
825 } 846 }
826 847
848 /* Store the permanent mac address */
849 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
850
827 /* 851 /*
828 * Store MAC address from RAR0, clear receive address registers, and 852 * Store MAC address from RAR0, clear receive address registers, and
829 * clear the multicast table 853 * clear the multicast table
830 */ 854 */
831 hw->mac.ops.init_rx_addrs(hw); 855 hw->mac.ops.init_rx_addrs(hw);
832 856
833 /* Store the permanent mac address */
834 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
835
836reset_hw_out: 857reset_hw_out:
837 if (phy_status) 858 if (phy_status)
838 status = phy_status; 859 status = phy_status;
@@ -849,6 +870,13 @@ reset_hw_out:
849static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 870static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
850{ 871{
851 u32 rar_high; 872 u32 rar_high;
873 u32 rar_entries = hw->mac.num_rar_entries;
874
875 /* Make sure we are using a valid rar index range */
876 if (rar >= rar_entries) {
877 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
878 return IXGBE_ERR_INVALID_ARGUMENT;
879 }
852 880
853 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 881 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
854 rar_high &= ~IXGBE_RAH_VIND_MASK; 882 rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +896,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
868 u32 rar_high; 896 u32 rar_high;
869 u32 rar_entries = hw->mac.num_rar_entries; 897 u32 rar_entries = hw->mac.num_rar_entries;
870 898
871 if (rar < rar_entries) { 899
872 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 900 /* Make sure we are using a valid rar index range */
873 if (rar_high & IXGBE_RAH_VIND_MASK) { 901 if (rar >= rar_entries) {
874 rar_high &= ~IXGBE_RAH_VIND_MASK;
875 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
876 }
877 } else {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 902 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
903 return IXGBE_ERR_INVALID_ARGUMENT;
904 }
905
906 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
907 if (rar_high & IXGBE_RAH_VIND_MASK) {
908 rar_high &= ~IXGBE_RAH_VIND_MASK;
909 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
879 } 910 }
880 911
881 return 0; 912 return 0;
@@ -994,13 +1025,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
994} 1025}
995 1026
996/** 1027/**
997 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module 1028 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
998 * over I2C interface through an intermediate phy.
999 * @hw: pointer to hardware structure 1029 * @hw: pointer to hardware structure
1000 * @byte_offset: EEPROM byte offset to read 1030 * @byte_offset: EEPROM byte offset to read
1001 * @eeprom_data: value read 1031 * @eeprom_data: value read
1002 * 1032 *
1003 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1033 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1004 **/ 1034 **/
1005static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1035static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1006 u8 *eeprom_data) 1036 u8 *eeprom_data)
@@ -1074,10 +1104,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1074 1104
1075 /* Copper PHY must be checked before AUTOC LMS to determine correct 1105 /* Copper PHY must be checked before AUTOC LMS to determine correct
1076 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1106 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1077 if (hw->phy.type == ixgbe_phy_tn || 1107 switch (hw->phy.type) {
1078 hw->phy.type == ixgbe_phy_cu_unknown) { 1108 case ixgbe_phy_tn:
1079 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1109 case ixgbe_phy_aq:
1080 &ext_ability); 1110 case ixgbe_phy_cu_unknown:
1111 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
1112 MDIO_MMD_PMAPMD, &ext_ability);
1081 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1113 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1082 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1114 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1083 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1115 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1085,6 +1117,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1085 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1117 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1086 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1118 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1087 goto out; 1119 goto out;
1120 default:
1121 break;
1088 } 1122 }
1089 1123
1090 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1124 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1179,13 +1213,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1179 .set_vmdq = &ixgbe_set_vmdq_82598, 1213 .set_vmdq = &ixgbe_set_vmdq_82598,
1180 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1214 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1181 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1215 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1182 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1183 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1216 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1184 .enable_mc = &ixgbe_enable_mc_generic, 1217 .enable_mc = &ixgbe_enable_mc_generic,
1185 .disable_mc = &ixgbe_disable_mc_generic, 1218 .disable_mc = &ixgbe_disable_mc_generic,
1186 .clear_vfta = &ixgbe_clear_vfta_82598, 1219 .clear_vfta = &ixgbe_clear_vfta_82598,
1187 .set_vfta = &ixgbe_set_vfta_82598, 1220 .set_vfta = &ixgbe_set_vfta_82598,
1188 .fc_enable = &ixgbe_fc_enable_82598, 1221 .fc_enable = &ixgbe_fc_enable_82598,
1222 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1223 .release_swfw_sync = &ixgbe_release_swfw_sync,
1189}; 1224};
1190 1225
1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1226static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a21f5817685b..00aeba385a2f 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
112 goto setup_sfp_out; 112 goto setup_sfp_out;
113 113
114 /* PHY config will finish before releasing the semaphore */ 114 /* PHY config will finish before releasing the semaphore */
115 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 115 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
116 IXGBE_GSSR_MAC_CSR_SM);
116 if (ret_val != 0) { 117 if (ret_val != 0) {
117 ret_val = IXGBE_ERR_SWFW_SYNC; 118 ret_val = IXGBE_ERR_SWFW_SYNC;
118 goto setup_sfp_out; 119 goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
329 enum ixgbe_media_type media_type; 330 enum ixgbe_media_type media_type;
330 331
331 /* Detect if there is a copper PHY attached. */ 332 /* Detect if there is a copper PHY attached. */
332 if (hw->phy.type == ixgbe_phy_cu_unknown || 333 switch (hw->phy.type) {
333 hw->phy.type == ixgbe_phy_tn || 334 case ixgbe_phy_cu_unknown:
334 hw->phy.type == ixgbe_phy_aq) { 335 case ixgbe_phy_tn:
336 case ixgbe_phy_aq:
335 media_type = ixgbe_media_type_copper; 337 media_type = ixgbe_media_type_copper;
336 goto out; 338 goto out;
339 default:
340 break;
337 } 341 }
338 342
339 switch (hw->device_id) { 343 switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
354 case IXGBE_DEV_ID_82599_CX4: 358 case IXGBE_DEV_ID_82599_CX4:
355 media_type = ixgbe_media_type_cx4; 359 media_type = ixgbe_media_type_cx4;
356 break; 360 break;
361 case IXGBE_DEV_ID_82599_T3_LOM:
362 media_type = ixgbe_media_type_copper;
363 break;
357 default: 364 default:
358 media_type = ixgbe_media_type_unknown; 365 media_type = ixgbe_media_type_unknown;
359 break; 366 break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
411 return status; 418 return status;
412} 419}
413 420
414 /** 421/**
415 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 422 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
416 * @hw: pointer to hardware structure 423 * @hw: pointer to hardware structure
417 * 424 *
418 * The base drivers may require better control over SFP+ module 425 * The base drivers may require better control over SFP+ module
419 * PHY states. This includes selectively shutting down the Tx 426 * PHY states. This includes selectively shutting down the Tx
420 * laser on the PHY, effectively halting physical link. 427 * laser on the PHY, effectively halting physical link.
421 **/ 428 **/
422static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 429static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
423{ 430{
424 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 431 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
463 **/ 470 **/
464static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 471static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
465{ 472{
466 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
467
468 if (hw->mac.autotry_restart) { 473 if (hw->mac.autotry_restart) {
469 ixgbe_disable_tx_laser_multispeed_fiber(hw); 474 ixgbe_disable_tx_laser_multispeed_fiber(hw);
470 ixgbe_enable_tx_laser_multispeed_fiber(hw); 475 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
487 bool autoneg_wait_to_complete) 492 bool autoneg_wait_to_complete)
488{ 493{
489 s32 status = 0; 494 s32 status = 0;
490 ixgbe_link_speed phy_link_speed; 495 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
491 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 496 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
492 u32 speedcnt = 0; 497 u32 speedcnt = 0;
493 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 498 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
499 u32 i = 0;
494 bool link_up = false; 500 bool link_up = false;
495 bool negotiation; 501 bool negotiation;
496 int i;
497 502
498 /* Mask off requested but non-supported speeds */ 503 /* Mask off requested but non-supported speeds */
499 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); 504 status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
500 speed &= phy_link_speed; 505 &negotiation);
506 if (status != 0)
507 return status;
508
509 speed &= link_speed;
501 510
502 /* 511 /*
503 * Try each speed one by one, highest priority first. We do this in 512 * Try each speed one by one, highest priority first. We do this in
@@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
508 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 517 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
509 518
510 /* If we already have link at this speed, just jump out */ 519 /* If we already have link at this speed, just jump out */
511 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 520 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
521 false);
522 if (status != 0)
523 return status;
512 524
513 if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 525 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
514 goto out; 526 goto out;
515 527
516 /* Set the module link speed */ 528 /* Set the module link speed */
@@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
522 msleep(40); 534 msleep(40);
523 535
524 status = ixgbe_setup_mac_link_82599(hw, 536 status = ixgbe_setup_mac_link_82599(hw,
525 IXGBE_LINK_SPEED_10GB_FULL, 537 IXGBE_LINK_SPEED_10GB_FULL,
526 autoneg, 538 autoneg,
527 autoneg_wait_to_complete); 539 autoneg_wait_to_complete);
528 if (status != 0) 540 if (status != 0)
529 return status; 541 return status;
530 542
@@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
536 * Section 73.10.2, we may have to wait up to 500ms if KR is 548 * Section 73.10.2, we may have to wait up to 500ms if KR is
537 * attempted. 82599 uses the same timing for 10g SFI. 549 * attempted. 82599 uses the same timing for 10g SFI.
538 */ 550 */
539
540 for (i = 0; i < 5; i++) { 551 for (i = 0; i < 5; i++) {
541 /* Wait for the link partner to also set speed */ 552 /* Wait for the link partner to also set speed */
542 msleep(100); 553 msleep(100);
543 554
544 /* If we have link, just jump out */ 555 /* If we have link, just jump out */
545 hw->mac.ops.check_link(hw, &phy_link_speed, 556 status = hw->mac.ops.check_link(hw, &link_speed,
546 &link_up, false); 557 &link_up, false);
558 if (status != 0)
559 return status;
560
547 if (link_up) 561 if (link_up)
548 goto out; 562 goto out;
549 } 563 }
@@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
555 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 569 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
556 570
557 /* If we already have link at this speed, just jump out */ 571 /* If we already have link at this speed, just jump out */
558 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 572 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
573 false);
574 if (status != 0)
575 return status;
559 576
560 if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 577 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
561 goto out; 578 goto out;
562 579
563 /* Set the module link speed */ 580 /* Set the module link speed */
@@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
570 msleep(40); 587 msleep(40);
571 588
572 status = ixgbe_setup_mac_link_82599(hw, 589 status = ixgbe_setup_mac_link_82599(hw,
573 IXGBE_LINK_SPEED_1GB_FULL, 590 IXGBE_LINK_SPEED_1GB_FULL,
574 autoneg, 591 autoneg,
575 autoneg_wait_to_complete); 592 autoneg_wait_to_complete);
576 if (status != 0) 593 if (status != 0)
577 return status; 594 return status;
578 595
@@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
583 msleep(100); 600 msleep(100);
584 601
585 /* If we have link, just jump out */ 602 /* If we have link, just jump out */
586 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 603 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
604 false);
605 if (status != 0)
606 return status;
607
587 if (link_up) 608 if (link_up)
588 goto out; 609 goto out;
589 } 610 }
@@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
626 bool autoneg_wait_to_complete) 647 bool autoneg_wait_to_complete)
627{ 648{
628 s32 status = 0; 649 s32 status = 0;
629 ixgbe_link_speed link_speed; 650 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
630 s32 i, j; 651 s32 i, j;
631 bool link_up = false; 652 bool link_up = false;
632 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 653 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
633 struct ixgbe_adapter *adapter = hw->back;
634
635 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
636 654
637 /* Set autoneg_advertised value based on input link speed */ 655 /* Set autoneg_advertised value based on input link speed */
638 hw->phy.autoneg_advertised = 0; 656 hw->phy.autoneg_advertised = 0;
@@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
658 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 676 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
659 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 677 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
660 autoneg_wait_to_complete); 678 autoneg_wait_to_complete);
661 if (status) 679 if (status != 0)
662 goto out; 680 goto out;
663 681
664 /* 682 /*
@@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
671 mdelay(100); 689 mdelay(100);
672 690
673 /* If we have link, just jump out */ 691 /* If we have link, just jump out */
674 hw->mac.ops.check_link(hw, &link_speed, 692 status = hw->mac.ops.check_link(hw, &link_speed,
675 &link_up, false); 693 &link_up, false);
694 if (status != 0)
695 goto out;
696
676 if (link_up) 697 if (link_up)
677 goto out; 698 goto out;
678 } 699 }
@@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
690 hw->phy.smart_speed_active = true; 711 hw->phy.smart_speed_active = true;
691 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 712 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
692 autoneg_wait_to_complete); 713 autoneg_wait_to_complete);
693 if (status) 714 if (status != 0)
694 goto out; 715 goto out;
695 716
696 /* 717 /*
@@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
703 mdelay(100); 724 mdelay(100);
704 725
705 /* If we have link, just jump out */ 726 /* If we have link, just jump out */
706 hw->mac.ops.check_link(hw, &link_speed, 727 status = hw->mac.ops.check_link(hw, &link_speed,
707 &link_up, false); 728 &link_up, false);
729 if (status != 0)
730 goto out;
731
708 if (link_up) 732 if (link_up)
709 goto out; 733 goto out;
710 } 734 }
@@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
716 740
717out: 741out:
718 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 742 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
719 e_info(hw, "Smartspeed has downgraded the link speed from " 743 hw_dbg(hw, "Smartspeed has downgraded the link speed from "
720 "the maximum advertised\n"); 744 "the maximum advertised\n");
721 return status; 745 return status;
722} 746}
@@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
748 772
749 /* Check to see if speed passed in is supported. */ 773 /* Check to see if speed passed in is supported. */
750 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 774 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
775 if (status != 0)
776 goto out;
777
751 speed &= link_capabilities; 778 speed &= link_capabilities;
752 779
753 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 780 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
761 else 788 else
762 orig_autoc = autoc; 789 orig_autoc = autoc;
763 790
764
765 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 791 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
766 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 792 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
767 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 793 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 904
879 /* PHY ops must be identified and initialized prior to reset */ 905 /* PHY ops must be identified and initialized prior to reset */
880 906
881 /* Init PHY and function pointers, perform SFP setup */ 907 /* Identify PHY and related function pointers */
882 status = hw->phy.ops.init(hw); 908 status = hw->phy.ops.init(hw);
883 909
884 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 910 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890 hw->phy.sfp_setup_needed = false; 916 hw->phy.sfp_setup_needed = false;
891 } 917 }
892 918
919 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
920 goto reset_hw_out;
921
893 /* Reset PHY */ 922 /* Reset PHY */
894 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 923 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
895 hw->phy.ops.reset(hw); 924 hw->phy.ops.reset(hw);
@@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
898 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 927 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
899 * access and verify no pending requests before reset 928 * access and verify no pending requests before reset
900 */ 929 */
901 status = ixgbe_disable_pcie_master(hw); 930 ixgbe_disable_pcie_master(hw);
902 if (status != 0) {
903 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
904 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
905 }
906 931
932mac_reset_top:
907 /* 933 /*
908 * Issue global reset to the MAC. This needs to be a SW reset. 934 * Issue global reset to the MAC. This needs to be a SW reset.
909 * If link reset is used, it might reset the MAC when mng is using it 935 * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
924 hw_dbg(hw, "Reset polling failed to complete.\n"); 950 hw_dbg(hw, "Reset polling failed to complete.\n");
925 } 951 }
926 952
953 /*
954 * Double resets are required for recovery from certain error
955 * conditions. Between resets, it is necessary to stall to allow time
956 * for any pending HW events to complete. We use 1usec since that is
957 * what is needed for ixgbe_disable_pcie_master(). The second reset
958 * then clears out any effects of those events.
959 */
960 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
961 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
962 udelay(1);
963 goto mac_reset_top;
964 }
965
927 msleep(50); 966 msleep(50);
928 967
929 /* 968 /*
@@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
951 } 990 }
952 } 991 }
953 992
993 /* Store the permanent mac address */
994 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
995
954 /* 996 /*
955 * Store MAC address from RAR0, clear receive address registers, and 997 * Store MAC address from RAR0, clear receive address registers, and
956 * clear the multicast table. Also reset num_rar_entries to 128, 998 * clear the multicast table. Also reset num_rar_entries to 128,
@@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
959 hw->mac.num_rar_entries = 128; 1001 hw->mac.num_rar_entries = 128;
960 hw->mac.ops.init_rx_addrs(hw); 1002 hw->mac.ops.init_rx_addrs(hw);
961 1003
962 /* Store the permanent mac address */
963 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
964
965 /* Store the permanent SAN mac address */ 1004 /* Store the permanent SAN mac address */
966 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1005 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
967 1006
@@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1733 * @hw: pointer to hardware structure 1772 * @hw: pointer to hardware structure
1734 * 1773 *
1735 * Determines the physical layer module found on the current adapter. 1774 * Determines the physical layer module found on the current adapter.
1775 * If PHY already detected, maintains current PHY type in hw struct,
1776 * otherwise executes the PHY detection routine.
1736 **/ 1777 **/
1737static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1778s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1738{ 1779{
1739 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1780 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1781
1782 /* Detect PHY if not unknown - returns success if already detected. */
1740 status = ixgbe_identify_phy_generic(hw); 1783 status = ixgbe_identify_phy_generic(hw);
1741 if (status != 0) 1784 if (status != 0) {
1742 status = ixgbe_identify_sfp_module_generic(hw); 1785 /* 82599 10GBASE-T requires an external PHY */
1786 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1787 goto out;
1788 else
1789 status = ixgbe_identify_sfp_module_generic(hw);
1790 }
1791
1792 /* Set PHY type none if no PHY detected */
1793 if (hw->phy.type == ixgbe_phy_unknown) {
1794 hw->phy.type = ixgbe_phy_none;
1795 status = 0;
1796 }
1797
1798 /* Return error if SFP module has been detected but is not supported */
1799 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1800 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1801
1802out:
1743 return status; 1803 return status;
1744} 1804}
1745 1805
@@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1763 1823
1764 hw->phy.ops.identify(hw); 1824 hw->phy.ops.identify(hw);
1765 1825
1766 if (hw->phy.type == ixgbe_phy_tn || 1826 switch (hw->phy.type) {
1767 hw->phy.type == ixgbe_phy_aq || 1827 case ixgbe_phy_tn:
1768 hw->phy.type == ixgbe_phy_cu_unknown) { 1828 case ixgbe_phy_aq:
1829 case ixgbe_phy_cu_unknown:
1769 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1830 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1770 &ext_ability); 1831 &ext_ability);
1771 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1832 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1772 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1833 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1773 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1834 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1775 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1836 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1776 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1837 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1777 goto out; 1838 goto out;
1839 default:
1840 break;
1778 } 1841 }
1779 1842
1780 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1843 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1886 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 1949 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
1887 break; 1950 break;
1888 else 1951 else
1952 /* Use interrupt-safe sleep just in case */
1889 udelay(10); 1953 udelay(10);
1890 } 1954 }
1891 1955
@@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1995 .set_vmdq = &ixgbe_set_vmdq_generic, 2059 .set_vmdq = &ixgbe_set_vmdq_generic,
1996 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2060 .clear_vmdq = &ixgbe_clear_vmdq_generic,
1997 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2061 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1998 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1999 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2062 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2000 .enable_mc = &ixgbe_enable_mc_generic, 2063 .enable_mc = &ixgbe_enable_mc_generic,
2001 .disable_mc = &ixgbe_disable_mc_generic, 2064 .disable_mc = &ixgbe_disable_mc_generic,
@@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2006 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2069 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2007 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 2070 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2008 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2071 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2072 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2073 .release_swfw_sync = &ixgbe_release_swfw_sync,
2074
2009}; 2075};
2010 2076
2011static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2077static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2012 .init_params = &ixgbe_init_eeprom_params_generic, 2078 .init_params = &ixgbe_init_eeprom_params_generic,
2013 .read = &ixgbe_read_eerd_generic, 2079 .read = &ixgbe_read_eerd_generic,
2014 .write = &ixgbe_write_eeprom_generic, 2080 .write = &ixgbe_write_eeprom_generic,
2015 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 2081 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2016 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2082 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2017 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2083 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2018}; 2084};
2019 2085
2020static struct ixgbe_phy_operations phy_ops_82599 = { 2086static struct ixgbe_phy_operations phy_ops_82599 = {
2021 .identify = &ixgbe_identify_phy_82599, 2087 .identify = &ixgbe_identify_phy_82599,
2022 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2088 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2023 .init = &ixgbe_init_phy_ops_82599, 2089 .init = &ixgbe_init_phy_ops_82599,
2024 .reset = &ixgbe_reset_phy_generic, 2090 .reset = &ixgbe_reset_phy_generic,
2025 .read_reg = &ixgbe_read_phy_reg_generic, 2091 .read_reg = &ixgbe_read_phy_reg_generic,
2026 .write_reg = &ixgbe_write_phy_reg_generic, 2092 .write_reg = &ixgbe_write_phy_reg_generic,
2027 .setup_link = &ixgbe_setup_phy_link_generic, 2093 .setup_link = &ixgbe_setup_phy_link_generic,
2028 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 2094 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
2029 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, 2095 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
2030 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2096 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2031 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2097 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2032 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2098 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2033 .check_overtemp = &ixgbe_tn_check_overtemp, 2099 .check_overtemp = &ixgbe_tn_check_overtemp,
2034}; 2100};
2035 2101
2036struct ixgbe_info ixgbe_82599_info = { 2102struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index ebbda7d15254..bcd952916eb2 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
54 57
55/** 58/**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
139 IXGBE_READ_REG(hw, IXGBE_MRFC); 142 IXGBE_READ_REG(hw, IXGBE_MRFC);
140 IXGBE_READ_REG(hw, IXGBE_RLEC); 143 IXGBE_READ_REG(hw, IXGBE_RLEC);
141 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 144 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
142 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
143 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 145 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 146 if (hw->mac.type >= ixgbe_mac_82599EB) {
147 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
148 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
149 } else {
150 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
151 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
152 }
145 153
146 for (i = 0; i < 8; i++) { 154 for (i = 0; i < 8; i++) {
147 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 155 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
148 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 156 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 157 if (hw->mac.type >= ixgbe_mac_82599EB) {
158 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
159 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
160 } else {
161 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
162 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
163 }
151 } 164 }
152 165 if (hw->mac.type >= ixgbe_mac_82599EB)
166 for (i = 0; i < 8; i++)
167 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
153 IXGBE_READ_REG(hw, IXGBE_PRC64); 168 IXGBE_READ_REG(hw, IXGBE_PRC64);
154 IXGBE_READ_REG(hw, IXGBE_PRC127); 169 IXGBE_READ_REG(hw, IXGBE_PRC127);
155 IXGBE_READ_REG(hw, IXGBE_PRC255); 170 IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
187 IXGBE_READ_REG(hw, IXGBE_BPTC); 202 IXGBE_READ_REG(hw, IXGBE_BPTC);
188 for (i = 0; i < 16; i++) { 203 for (i = 0; i < 16; i++) {
189 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 204 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
190 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 205 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
192 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 206 if (hw->mac.type >= ixgbe_mac_82599EB) {
207 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
208 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
209 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
210 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
211 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
212 } else {
213 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
214 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
215 }
216 }
217
218 if (hw->mac.type == ixgbe_mac_X540) {
219 if (hw->phy.id == 0)
220 hw->phy.ops.identify(hw);
221 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
222 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
223 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
224 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
193 } 225 }
194 226
195 return 0; 227 return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
454 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 486 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
455 * access and verify no pending requests 487 * access and verify no pending requests
456 */ 488 */
457 if (ixgbe_disable_pcie_master(hw) != 0) 489 ixgbe_disable_pcie_master(hw);
458 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
459 490
460 return 0; 491 return 0;
461} 492}
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
603 ixgbe_shift_out_eeprom_bits(hw, data, 16); 634 ixgbe_shift_out_eeprom_bits(hw, data, 16);
604 ixgbe_standby_eeprom(hw); 635 ixgbe_standby_eeprom(hw);
605 636
606 msleep(hw->eeprom.semaphore_delay);
607 /* Done with writing - release the EEPROM */ 637 /* Done with writing - release the EEPROM */
608 ixgbe_release_eeprom(hw); 638 ixgbe_release_eeprom(hw);
609 } 639 }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
747static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 777static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
748{ 778{
749 s32 status = 0; 779 s32 status = 0;
750 u32 eec = 0; 780 u32 eec;
751 u32 i; 781 u32 i;
752 782
753 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 783 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
754 status = IXGBE_ERR_SWFW_SYNC; 784 status = IXGBE_ERR_SWFW_SYNC;
755 785
756 if (status == 0) { 786 if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
773 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 803 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
774 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 804 hw_dbg(hw, "Could not acquire EEPROM grant\n");
775 805
776 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 806 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
777 status = IXGBE_ERR_EEPROM; 807 status = IXGBE_ERR_EEPROM;
778 } 808 }
779 }
780 809
781 /* Setup EEPROM for Read/Write */ 810 /* Setup EEPROM for Read/Write */
782 if (status == 0) { 811 if (status == 0) {
783 /* Clear CS and SK */ 812 /* Clear CS and SK */
784 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 813 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
785 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 814 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
786 IXGBE_WRITE_FLUSH(hw); 815 IXGBE_WRITE_FLUSH(hw);
787 udelay(1); 816 udelay(1);
817 }
788 } 818 }
789 return status; 819 return status;
790} 820}
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
798static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 828static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
799{ 829{
800 s32 status = IXGBE_ERR_EEPROM; 830 s32 status = IXGBE_ERR_EEPROM;
801 u32 timeout; 831 u32 timeout = 2000;
802 u32 i; 832 u32 i;
803 u32 swsm; 833 u32 swsm;
804 834
805 /* Set timeout value based on size of EEPROM */
806 timeout = hw->eeprom.word_size + 1;
807
808 /* Get SMBI software semaphore between device drivers first */ 835 /* Get SMBI software semaphore between device drivers first */
809 for (i = 0; i < timeout; i++) { 836 for (i = 0; i < timeout; i++) {
810 /* 837 /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
816 status = 0; 843 status = 0;
817 break; 844 break;
818 } 845 }
819 msleep(1); 846 udelay(50);
820 } 847 }
821 848
822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 849 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
844 * was not granted because we don't have access to the EEPROM 871 * was not granted because we don't have access to the EEPROM
845 */ 872 */
846 if (i >= timeout) { 873 if (i >= timeout) {
847 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 874 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
848 "not granted.\n"); 875 "not granted.\n");
849 ixgbe_release_eeprom_semaphore(hw); 876 ixgbe_release_eeprom_semaphore(hw);
850 status = IXGBE_ERR_EEPROM; 877 status = IXGBE_ERR_EEPROM;
851 } 878 }
879 } else {
880 hw_dbg(hw, "Software semaphore SMBI between device drivers "
881 "not granted.\n");
852 } 882 }
853 883
854 return status; 884 return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1080 eec &= ~IXGBE_EEC_REQ; 1110 eec &= ~IXGBE_EEC_REQ;
1081 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1111 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1082 1112
1083 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1113 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1114
1115 /* Delay before attempt to obtain semaphore again to allow FW access */
1116 msleep(hw->eeprom.semaphore_delay);
1084} 1117}
1085 1118
1086/** 1119/**
1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1120 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1088 * @hw: pointer to hardware structure 1121 * @hw: pointer to hardware structure
1089 **/ 1122 **/
1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1123u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1190 if (status == 0) { 1223 if (status == 0) {
1191 checksum = hw->eeprom.ops.calc_checksum(hw); 1224 checksum = hw->eeprom.ops.calc_checksum(hw);
1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1225 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1193 checksum); 1226 checksum);
1194 } else { 1227 } else {
1195 hw_dbg(hw, "EEPROM read failed\n"); 1228 hw_dbg(hw, "EEPROM read failed\n");
1196 } 1229 }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1238 u32 rar_low, rar_high; 1271 u32 rar_low, rar_high;
1239 u32 rar_entries = hw->mac.num_rar_entries; 1272 u32 rar_entries = hw->mac.num_rar_entries;
1240 1273
1274 /* Make sure we are using a valid rar index range */
1275 if (index >= rar_entries) {
1276 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1277 return IXGBE_ERR_INVALID_ARGUMENT;
1278 }
1279
1241 /* setup VMDq pool selection before this RAR gets enabled */ 1280 /* setup VMDq pool selection before this RAR gets enabled */
1242 hw->mac.ops.set_vmdq(hw, index, vmdq); 1281 hw->mac.ops.set_vmdq(hw, index, vmdq);
1243 1282
1244 /* Make sure we are using a valid rar index range */ 1283 /*
1245 if (index < rar_entries) { 1284 * HW expects these in little endian so we reverse the byte
1246 /* 1285 * order from network order (big endian) to little endian
1247 * HW expects these in little endian so we reverse the byte 1286 */
1248 * order from network order (big endian) to little endian 1287 rar_low = ((u32)addr[0] |
1249 */ 1288 ((u32)addr[1] << 8) |
1250 rar_low = ((u32)addr[0] | 1289 ((u32)addr[2] << 16) |
1251 ((u32)addr[1] << 8) | 1290 ((u32)addr[3] << 24));
1252 ((u32)addr[2] << 16) | 1291 /*
1253 ((u32)addr[3] << 24)); 1292 * Some parts put the VMDq setting in the extra RAH bits,
1254 /* 1293 * so save everything except the lower 16 bits that hold part
1255 * Some parts put the VMDq setting in the extra RAH bits, 1294 * of the address and the address valid bit.
1256 * so save everything except the lower 16 bits that hold part 1295 */
1257 * of the address and the address valid bit. 1296 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1258 */ 1297 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1298 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1260 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1261 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1262 1299
1263 if (enable_addr != 0) 1300 if (enable_addr != 0)
1264 rar_high |= IXGBE_RAH_AV; 1301 rar_high |= IXGBE_RAH_AV;
1265 1302
1266 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1303 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1267 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1304 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1268 } else {
1269 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1270 return IXGBE_ERR_RAR_INDEX;
1271 }
1272 1305
1273 return 0; 1306 return 0;
1274} 1307}
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1286 u32 rar_entries = hw->mac.num_rar_entries; 1319 u32 rar_entries = hw->mac.num_rar_entries;
1287 1320
1288 /* Make sure we are using a valid rar index range */ 1321 /* Make sure we are using a valid rar index range */
1289 if (index < rar_entries) { 1322 if (index >= rar_entries) {
1290 /*
1291 * Some parts put the VMDq setting in the extra RAH bits,
1292 * so save everything except the lower 16 bits that hold part
1293 * of the address and the address valid bit.
1294 */
1295 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1296 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1297
1298 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1299 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1300 } else {
1301 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1323 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1302 return IXGBE_ERR_RAR_INDEX; 1324 return IXGBE_ERR_INVALID_ARGUMENT;
1303 } 1325 }
1304 1326
1305 /* clear VMDq pool/queue selection for this RAR */ 1327 /*
1306 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1328 * Some parts put the VMDq setting in the extra RAH bits,
1307 1329 * so save everything except the lower 16 bits that hold part
1308 return 0; 1330 * of the address and the address valid bit.
1309} 1331 */
1310
1311/**
1312 * ixgbe_enable_rar - Enable Rx address register
1313 * @hw: pointer to hardware structure
1314 * @index: index into the RAR table
1315 *
1316 * Enables the select receive address register.
1317 **/
1318static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1319{
1320 u32 rar_high;
1321
1322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1332 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1323 rar_high |= IXGBE_RAH_AV; 1333 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1334
1335 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1336 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1325}
1326 1337
1327/** 1338 /* clear VMDq pool/queue selection for this RAR */
1328 * ixgbe_disable_rar - Disable Rx address register 1339 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1329 * @hw: pointer to hardware structure
1330 * @index: index into the RAR table
1331 *
1332 * Disables the select receive address register.
1333 **/
1334static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1335{
1336 u32 rar_high;
1337 1340
1338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1341 return 0;
1339 rar_high &= (~IXGBE_RAH_AV);
1340 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1341} 1342}
1342 1343
1343/** 1344/**
@@ -1386,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1386 } 1387 }
1387 1388
1388 /* Clear the MTA */ 1389 /* Clear the MTA */
1389 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1390 hw->addr_ctrl.mta_in_use = 0; 1390 hw->addr_ctrl.mta_in_use = 0;
1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1392 1392
@@ -1401,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1401} 1401}
1402 1402
1403/** 1403/**
1404 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1405 * @hw: pointer to hardware structure
1406 * @addr: new address
1407 *
1408 * Adds it to unused receive address register or goes into promiscuous mode.
1409 **/
1410static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1411{
1412 u32 rar_entries = hw->mac.num_rar_entries;
1413 u32 rar;
1414
1415 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1416 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1417
1418 /*
1419 * Place this address in the RAR if there is room,
1420 * else put the controller into promiscuous mode
1421 */
1422 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1423 rar = hw->addr_ctrl.rar_used_count -
1424 hw->addr_ctrl.mc_addr_in_rar_count;
1425 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1426 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1427 hw->addr_ctrl.rar_used_count++;
1428 } else {
1429 hw->addr_ctrl.overflow_promisc++;
1430 }
1431
1432 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1433}
1434
1435/**
1436 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1437 * @hw: pointer to hardware structure
1438 * @netdev: pointer to net device structure
1439 *
1440 * The given list replaces any existing list. Clears the secondary addrs from
1441 * receive address registers. Uses unused receive address registers for the
1442 * first secondary addresses, and falls back to promiscuous mode as needed.
1443 *
1444 * Drivers using secondary unicast addresses must set user_set_promisc when
1445 * manually putting the device into promiscuous mode.
1446 **/
1447s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1448 struct net_device *netdev)
1449{
1450 u32 i;
1451 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1452 u32 uc_addr_in_use;
1453 u32 fctrl;
1454 struct netdev_hw_addr *ha;
1455
1456 /*
1457 * Clear accounting of old secondary address list,
1458 * don't count RAR[0]
1459 */
1460 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1461 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1462 hw->addr_ctrl.overflow_promisc = 0;
1463
1464 /* Zero out the other receive addresses */
1465 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1466 for (i = 0; i < uc_addr_in_use; i++) {
1467 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1468 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1469 }
1470
1471 /* Add the new addresses */
1472 netdev_for_each_uc_addr(ha, netdev) {
1473 hw_dbg(hw, " Adding the secondary addresses:\n");
1474 ixgbe_add_uc_addr(hw, ha->addr, 0);
1475 }
1476
1477 if (hw->addr_ctrl.overflow_promisc) {
1478 /* enable promisc if not already in overflow or set by user */
1479 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1480 hw_dbg(hw, " Entering address overflow promisc mode\n");
1481 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1482 fctrl |= IXGBE_FCTRL_UPE;
1483 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1484 hw->addr_ctrl.uc_set_promisc = true;
1485 }
1486 } else {
1487 /* only disable if set by overflow, not by user */
1488 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1489 !(hw->addr_ctrl.user_set_promisc)) {
1490 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1491 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1492 fctrl &= ~IXGBE_FCTRL_UPE;
1493 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1494 hw->addr_ctrl.uc_set_promisc = false;
1495 }
1496 }
1497
1498 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1499 return 0;
1500}
1501
1502/**
1503 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1404 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1504 * @hw: pointer to hardware structure 1405 * @hw: pointer to hardware structure
1505 * @mc_addr: the multicast address 1406 * @mc_addr: the multicast address
@@ -1550,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1550 u32 vector; 1451 u32 vector;
1551 u32 vector_bit; 1452 u32 vector_bit;
1552 u32 vector_reg; 1453 u32 vector_reg;
1553 u32 mta_reg;
1554 1454
1555 hw->addr_ctrl.mta_in_use++; 1455 hw->addr_ctrl.mta_in_use++;
1556 1456
@@ -1568,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1568 */ 1468 */
1569 vector_reg = (vector >> 5) & 0x7F; 1469 vector_reg = (vector >> 5) & 0x7F;
1570 vector_bit = vector & 0x1F; 1470 vector_bit = vector & 0x1F;
1571 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1471 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1572 mta_reg |= (1 << vector_bit);
1573 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1574} 1472}
1575 1473
1576/** 1474/**
@@ -1596,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1596 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1494 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1597 hw->addr_ctrl.mta_in_use = 0; 1495 hw->addr_ctrl.mta_in_use = 0;
1598 1496
1599 /* Clear the MTA */ 1497 /* Clear mta_shadow */
1600 hw_dbg(hw, " Clearing MTA\n"); 1498 hw_dbg(hw, " Clearing MTA\n");
1601 for (i = 0; i < hw->mac.mcft_size; i++) 1499 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1602 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1603 1500
1604 /* Add the new addresses */ 1501 /* Update mta shadow */
1605 netdev_for_each_mc_addr(ha, netdev) { 1502 netdev_for_each_mc_addr(ha, netdev) {
1606 hw_dbg(hw, " Adding the multicast addresses:\n"); 1503 hw_dbg(hw, " Adding the multicast addresses:\n");
1607 ixgbe_set_mta(hw, ha->addr); 1504 ixgbe_set_mta(hw, ha->addr);
1608 } 1505 }
1609 1506
1610 /* Enable mta */ 1507 /* Enable mta */
1508 for (i = 0; i < hw->mac.mcft_size; i++)
1509 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1510 hw->mac.mta_shadow[i]);
1511
1611 if (hw->addr_ctrl.mta_in_use > 0) 1512 if (hw->addr_ctrl.mta_in_use > 0)
1612 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1513 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1613 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1514 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1624,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1624 **/ 1525 **/
1625s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1526s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1626{ 1527{
1627 u32 i;
1628 u32 rar_entries = hw->mac.num_rar_entries;
1629 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1528 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1630 1529
1631 if (a->mc_addr_in_rar_count > 0)
1632 for (i = (rar_entries - a->mc_addr_in_rar_count);
1633 i < rar_entries; i++)
1634 ixgbe_enable_rar(hw, i);
1635
1636 if (a->mta_in_use > 0) 1530 if (a->mta_in_use > 0)
1637 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1531 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1638 hw->mac.mc_filter_type); 1532 hw->mac.mc_filter_type);
@@ -1648,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1648 **/ 1542 **/
1649s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1543s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1650{ 1544{
1651 u32 i;
1652 u32 rar_entries = hw->mac.num_rar_entries;
1653 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1545 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1654 1546
1655 if (a->mc_addr_in_rar_count > 0)
1656 for (i = (rar_entries - a->mc_addr_in_rar_count);
1657 i < rar_entries; i++)
1658 ixgbe_disable_rar(hw, i);
1659
1660 if (a->mta_in_use > 0) 1547 if (a->mta_in_use > 0)
1661 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1548 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1662 1549
@@ -1685,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1685#endif /* CONFIG_DCB */ 1572#endif /* CONFIG_DCB */
1686 /* Negotiate the fc mode to use */ 1573 /* Negotiate the fc mode to use */
1687 ret_val = ixgbe_fc_autoneg(hw); 1574 ret_val = ixgbe_fc_autoneg(hw);
1688 if (ret_val) 1575 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1689 goto out; 1576 goto out;
1690 1577
1691 /* Disable any previous flow control settings */ 1578 /* Disable any previous flow control settings */
@@ -1703,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1703 * 2: Tx flow control is enabled (we can send pause frames but 1590 * 2: Tx flow control is enabled (we can send pause frames but
1704 * we do not support receiving pause frames). 1591 * we do not support receiving pause frames).
1705 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1592 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1593#ifdef CONFIG_DCB
1706 * 4: Priority Flow Control is enabled. 1594 * 4: Priority Flow Control is enabled.
1595#endif
1707 * other: Invalid. 1596 * other: Invalid.
1708 */ 1597 */
1709 switch (hw->fc.current_mode) { 1598 switch (hw->fc.current_mode) {
@@ -1791,12 +1680,13 @@ out:
1791 **/ 1680 **/
1792s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 1681s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1793{ 1682{
1794 s32 ret_val = 0; 1683 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1795 ixgbe_link_speed speed; 1684 ixgbe_link_speed speed;
1796 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1797 u32 links2, anlp1_reg, autoc_reg, links;
1798 bool link_up; 1685 bool link_up;
1799 1686
1687 if (hw->fc.disable_fc_autoneg)
1688 goto out;
1689
1800 /* 1690 /*
1801 * AN should have completed when the cable was plugged in. 1691 * AN should have completed when the cable was plugged in.
1802 * Look for reasons to bail out. Bail out if: 1692 * Look for reasons to bail out. Bail out if:
@@ -1807,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1807 * So use link_up_wait_to_complete=false. 1697 * So use link_up_wait_to_complete=false.
1808 */ 1698 */
1809 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1699 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1810 1700 if (!link_up) {
1811 if (hw->fc.disable_fc_autoneg || (!link_up)) { 1701 ret_val = IXGBE_ERR_FLOW_CONTROL;
1812 hw->fc.fc_was_autonegged = false;
1813 hw->fc.current_mode = hw->fc.requested_mode;
1814 goto out; 1702 goto out;
1815 } 1703 }
1816 1704
1817 /* 1705 switch (hw->phy.media_type) {
1818 * On backplane, bail out if 1706 /* Autoneg flow control on fiber adapters */
1819 * - backplane autoneg was not completed, or if 1707 case ixgbe_media_type_fiber:
1820 * - we are 82599 and link partner is not AN enabled 1708 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1821 */ 1709 ret_val = ixgbe_fc_autoneg_fiber(hw);
1822 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1710 break;
1823 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1824 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1825 hw->fc.fc_was_autonegged = false;
1826 hw->fc.current_mode = hw->fc.requested_mode;
1827 goto out;
1828 }
1829 1711
1830 if (hw->mac.type == ixgbe_mac_82599EB) { 1712 /* Autoneg flow control on backplane adapters */
1831 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 1713 case ixgbe_media_type_backplane:
1832 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 1714 ret_val = ixgbe_fc_autoneg_backplane(hw);
1833 hw->fc.fc_was_autonegged = false; 1715 break;
1834 hw->fc.current_mode = hw->fc.requested_mode; 1716
1835 goto out; 1717 /* Autoneg flow control on copper adapters */
1836 } 1718 case ixgbe_media_type_copper:
1837 } 1719 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
1720 ret_val = ixgbe_fc_autoneg_copper(hw);
1721 break;
1722
1723 default:
1724 break;
1838 } 1725 }
1839 1726
1727out:
1728 if (ret_val == 0) {
1729 hw->fc.fc_was_autonegged = true;
1730 } else {
1731 hw->fc.fc_was_autonegged = false;
1732 hw->fc.current_mode = hw->fc.requested_mode;
1733 }
1734 return ret_val;
1735}
1736
1737/**
1738 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
1739 * @hw: pointer to hardware structure
1740 *
1741 * Enable flow control according on 1 gig fiber.
1742 **/
1743static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
1744{
1745 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1746 s32 ret_val;
1747
1840 /* 1748 /*
1841 * On multispeed fiber at 1g, bail out if 1749 * On multispeed fiber at 1g, bail out if
1842 * - link is up but AN did not complete, or if 1750 * - link is up but AN did not complete, or if
1843 * - link is up and AN completed but timed out 1751 * - link is up and AN completed but timed out
1844 */ 1752 */
1845 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { 1753
1846 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1754 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1847 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 1755 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1848 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 1756 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1849 hw->fc.fc_was_autonegged = false; 1757 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1850 hw->fc.current_mode = hw->fc.requested_mode; 1758 goto out;
1851 goto out;
1852 }
1853 } 1759 }
1854 1760
1761 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1762 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1763
1764 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
1765 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
1766 IXGBE_PCS1GANA_ASM_PAUSE,
1767 IXGBE_PCS1GANA_SYM_PAUSE,
1768 IXGBE_PCS1GANA_ASM_PAUSE);
1769
1770out:
1771 return ret_val;
1772}
1773
1774/**
1775 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
1776 * @hw: pointer to hardware structure
1777 *
1778 * Enable flow control according to IEEE clause 37.
1779 **/
1780static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
1781{
1782 u32 links2, anlp1_reg, autoc_reg, links;
1783 s32 ret_val;
1784
1855 /* 1785 /*
1856 * Bail out on 1786 * On backplane, bail out if
1857 * - copper or CX4 adapters 1787 * - backplane autoneg was not completed, or if
1858 * - fiber adapters running at 10gig 1788 * - we are 82599 and link partner is not AN enabled
1859 */ 1789 */
1860 if ((hw->phy.media_type == ixgbe_media_type_copper) || 1790 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1861 (hw->phy.media_type == ixgbe_media_type_cx4) || 1791 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1862 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1863 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1864 hw->fc.fc_was_autonegged = false; 1792 hw->fc.fc_was_autonegged = false;
1865 hw->fc.current_mode = hw->fc.requested_mode; 1793 hw->fc.current_mode = hw->fc.requested_mode;
1794 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1866 goto out; 1795 goto out;
1867 } 1796 }
1868 1797
1798 if (hw->mac.type == ixgbe_mac_82599EB) {
1799 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1800 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1801 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode;
1803 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1804 goto out;
1805 }
1806 }
1869 /* 1807 /*
1870 * Read the AN advertisement and LP ability registers and resolve 1808 * Read the 10g AN autoc and LP ability registers and resolve
1871 * local flow control settings accordingly 1809 * local flow control settings accordingly
1872 */ 1810 */
1873 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 1811 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1874 (hw->phy.media_type != ixgbe_media_type_backplane)) { 1812 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1875 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1876 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1877 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1878 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1879 /*
1880 * Now we need to check if the user selected Rx ONLY
1881 * of pause frames. In this case, we had to advertise
1882 * FULL flow control because we could not advertise RX
1883 * ONLY. Hence, we must now check to see if we need to
1884 * turn OFF the TRANSMISSION of PAUSE frames.
1885 */
1886 if (hw->fc.requested_mode == ixgbe_fc_full) {
1887 hw->fc.current_mode = ixgbe_fc_full;
1888 hw_dbg(hw, "Flow Control = FULL.\n");
1889 } else {
1890 hw->fc.current_mode = ixgbe_fc_rx_pause;
1891 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1892 }
1893 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1894 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1895 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1896 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1897 hw->fc.current_mode = ixgbe_fc_tx_pause;
1898 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1899 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1900 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1901 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1902 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1903 hw->fc.current_mode = ixgbe_fc_rx_pause;
1904 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1905 } else {
1906 hw->fc.current_mode = ixgbe_fc_none;
1907 hw_dbg(hw, "Flow Control = NONE.\n");
1908 }
1909 }
1910 1813
1911 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1814 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
1815 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
1816 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
1817
1818out:
1819 return ret_val;
1820}
1821
1822/**
1823 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
1824 * @hw: pointer to hardware structure
1825 *
1826 * Enable flow control according to IEEE clause 37.
1827 **/
1828static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
1829{
1830 u16 technology_ability_reg = 0;
1831 u16 lp_technology_ability_reg = 0;
1832
1833 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1834 MDIO_MMD_AN,
1835 &technology_ability_reg);
1836 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
1837 MDIO_MMD_AN,
1838 &lp_technology_ability_reg);
1839
1840 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
1841 (u32)lp_technology_ability_reg,
1842 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
1843 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
1844}
1845
1846/**
1847 * ixgbe_negotiate_fc - Negotiate flow control
1848 * @hw: pointer to hardware structure
1849 * @adv_reg: flow control advertised settings
1850 * @lp_reg: link partner's flow control settings
1851 * @adv_sym: symmetric pause bit in advertisement
1852 * @adv_asm: asymmetric pause bit in advertisement
1853 * @lp_sym: symmetric pause bit in link partner advertisement
1854 * @lp_asm: asymmetric pause bit in link partner advertisement
1855 *
1856 * Find the intersection between advertised settings and link partner's
1857 * advertised settings
1858 **/
1859static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1860 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
1861{
1862 if ((!(adv_reg)) || (!(lp_reg)))
1863 return IXGBE_ERR_FC_NOT_NEGOTIATED;
1864
1865 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
1912 /* 1866 /*
1913 * Read the 10g AN autoc and LP ability registers and resolve 1867 * Now we need to check if the user selected Rx ONLY
1914 * local flow control settings accordingly 1868 * of pause frames. In this case, we had to advertise
1869 * FULL flow control because we could not advertise RX
1870 * ONLY. Hence, we must now check to see if we need to
1871 * turn OFF the TRANSMISSION of PAUSE frames.
1915 */ 1872 */
1916 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1873 if (hw->fc.requested_mode == ixgbe_fc_full) {
1917 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 1874 hw->fc.current_mode = ixgbe_fc_full;
1918 1875 hw_dbg(hw, "Flow Control = FULL.\n");
1919 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1920 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1921 /*
1922 * Now we need to check if the user selected Rx ONLY
1923 * of pause frames. In this case, we had to advertise
1924 * FULL flow control because we could not advertise RX
1925 * ONLY. Hence, we must now check to see if we need to
1926 * turn OFF the TRANSMISSION of PAUSE frames.
1927 */
1928 if (hw->fc.requested_mode == ixgbe_fc_full) {
1929 hw->fc.current_mode = ixgbe_fc_full;
1930 hw_dbg(hw, "Flow Control = FULL.\n");
1931 } else {
1932 hw->fc.current_mode = ixgbe_fc_rx_pause;
1933 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1934 }
1935 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1936 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1937 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1938 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1939 hw->fc.current_mode = ixgbe_fc_tx_pause;
1940 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1941 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1942 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1943 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1944 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1945 hw->fc.current_mode = ixgbe_fc_rx_pause;
1946 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1947 } else { 1876 } else {
1948 hw->fc.current_mode = ixgbe_fc_none; 1877 hw->fc.current_mode = ixgbe_fc_rx_pause;
1949 hw_dbg(hw, "Flow Control = NONE.\n"); 1878 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
1950 } 1879 }
1880 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1881 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1882 hw->fc.current_mode = ixgbe_fc_tx_pause;
1883 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1884 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
1885 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
1886 hw->fc.current_mode = ixgbe_fc_rx_pause;
1887 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1888 } else {
1889 hw->fc.current_mode = ixgbe_fc_none;
1890 hw_dbg(hw, "Flow Control = NONE.\n");
1951 } 1891 }
1952 /* Record that current_mode is the result of a successful autoneg */ 1892 return 0;
1953 hw->fc.fc_was_autonegged = true;
1954
1955out:
1956 return ret_val;
1957} 1893}
1958 1894
1959/** 1895/**
@@ -1965,7 +1901,8 @@ out:
1965static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1901static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1966{ 1902{
1967 s32 ret_val = 0; 1903 s32 ret_val = 0;
1968 u32 reg; 1904 u32 reg = 0, reg_bp = 0;
1905 u16 reg_cu = 0;
1969 1906
1970#ifdef CONFIG_DCB 1907#ifdef CONFIG_DCB
1971 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1973,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1973 goto out; 1910 goto out;
1974 } 1911 }
1975 1912
1976#endif 1913#endif /* CONFIG_DCB */
1977 /* Validate the packetbuf configuration */ 1914 /* Validate the packetbuf configuration */
1978 if (packetbuf_num < 0 || packetbuf_num > 7) { 1915 if (packetbuf_num < 0 || packetbuf_num > 7) {
1979 hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2011,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 hw->fc.requested_mode = ixgbe_fc_full; 1948 hw->fc.requested_mode = ixgbe_fc_full;
2012 1949
2013 /* 1950 /*
2014 * Set up the 1G flow control advertisement registers so the HW will be 1951 * Set up the 1G and 10G flow control advertisement registers so the
2015 * able to do fc autoneg once the cable is plugged in. If we end up 1952 * HW will be able to do fc autoneg once the cable is plugged in. If
2016 * using 10g instead, this is harmless. 1953 * we link at 10G, the 1G advertisement is harmless and vice versa.
2017 */ 1954 */
2018 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1955
1956 switch (hw->phy.media_type) {
1957 case ixgbe_media_type_fiber:
1958 case ixgbe_media_type_backplane:
1959 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1960 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1961 break;
1962
1963 case ixgbe_media_type_copper:
1964 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1965 MDIO_MMD_AN, &reg_cu);
1966 break;
1967
1968 default:
1969 ;
1970 }
2019 1971
2020 /* 1972 /*
2021 * The possible values of fc.requested_mode are: 1973 * The possible values of fc.requested_mode are:
@@ -2034,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2034 case ixgbe_fc_none: 1986 case ixgbe_fc_none:
2035 /* Flow control completely disabled by software override. */ 1987 /* Flow control completely disabled by software override. */
2036 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 1988 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1989 if (hw->phy.media_type == ixgbe_media_type_backplane)
1990 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
1991 IXGBE_AUTOC_ASM_PAUSE);
1992 else if (hw->phy.media_type == ixgbe_media_type_copper)
1993 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2037 break; 1994 break;
2038 case ixgbe_fc_rx_pause: 1995 case ixgbe_fc_rx_pause:
2039 /* 1996 /*
@@ -2045,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2045 * disable the adapter's ability to send PAUSE frames. 2002 * disable the adapter's ability to send PAUSE frames.
2046 */ 2003 */
2047 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2004 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2005 if (hw->phy.media_type == ixgbe_media_type_backplane)
2006 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2007 IXGBE_AUTOC_ASM_PAUSE);
2008 else if (hw->phy.media_type == ixgbe_media_type_copper)
2009 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2048 break; 2010 break;
2049 case ixgbe_fc_tx_pause: 2011 case ixgbe_fc_tx_pause:
2050 /* 2012 /*
@@ -2053,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2053 */ 2015 */
2054 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2016 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2055 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2017 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2018 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2019 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2020 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2021 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2022 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2023 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2024 }
2056 break; 2025 break;
2057 case ixgbe_fc_full: 2026 case ixgbe_fc_full:
2058 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2027 /* Flow control (both Rx and Tx) is enabled by SW override. */
2059 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2028 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2029 if (hw->phy.media_type == ixgbe_media_type_backplane)
2030 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2031 IXGBE_AUTOC_ASM_PAUSE);
2032 else if (hw->phy.media_type == ixgbe_media_type_copper)
2033 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2060 break; 2034 break;
2061#ifdef CONFIG_DCB 2035#ifdef CONFIG_DCB
2062 case ixgbe_fc_pfc: 2036 case ixgbe_fc_pfc:
@@ -2070,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2070 break; 2044 break;
2071 } 2045 }
2072 2046
2073 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2047 if (hw->mac.type != ixgbe_mac_X540) {
2074 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2048 /*
2075 2049 * Enable auto-negotiation between the MAC & PHY;
2076 /* Disable AN timeout */ 2050 * the MAC will advertise clause 37 flow control.
2077 if (hw->fc.strict_ieee) 2051 */
2078 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2052 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2053 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2079 2054
2080 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2055 /* Disable AN timeout */
2081 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2056 if (hw->fc.strict_ieee)
2057 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2082 2058
2083 /* 2059 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2084 * Set up the 10G flow control advertisement registers so the HW 2060 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2085 * can do fc autoneg once the cable is plugged in. If we end up 2061 }
2086 * using 1g instead, this is harmless.
2087 */
2088 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2089 2062
2090 /* 2063 /*
2091 * The possible values of fc.requested_mode are: 2064 * AUTOC restart handles negotiation of 1G and 10G on backplane
2092 * 0: Flow control is completely disabled 2065 * and copper. There is no need to set the PCS1GCTL register.
2093 * 1: Rx flow control is enabled (we can receive pause frames, 2066 *
2094 * but not send pause frames).
2095 * 2: Tx flow control is enabled (we can send pause frames but
2096 * we do not support receiving pause frames).
2097 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2098 * other: Invalid.
2099 */ 2067 */
2100 switch (hw->fc.requested_mode) { 2068 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2101 case ixgbe_fc_none: 2069 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2102 /* Flow control completely disabled by software override. */ 2070 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2103 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); 2071 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2104 break; 2072 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2105 case ixgbe_fc_rx_pause: 2073 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2106 /* 2074 MDIO_MMD_AN, reg_cu);
2107 * Rx Flow control is enabled and Tx Flow control is
2108 * disabled by software override. Since there really
2109 * isn't a way to advertise that we are capable of RX
2110 * Pause ONLY, we will advertise that we support both
2111 * symmetric and asymmetric Rx PAUSE. Later, we will
2112 * disable the adapter's ability to send PAUSE frames.
2113 */
2114 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2115 break;
2116 case ixgbe_fc_tx_pause:
2117 /*
2118 * Tx Flow control is enabled, and Rx Flow control is
2119 * disabled by software override.
2120 */
2121 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2122 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2123 break;
2124 case ixgbe_fc_full:
2125 /* Flow control (both Rx and Tx) is enabled by SW override. */
2126 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2127 break;
2128#ifdef CONFIG_DCB
2129 case ixgbe_fc_pfc:
2130 goto out;
2131 break;
2132#endif /* CONFIG_DCB */
2133 default:
2134 hw_dbg(hw, "Flow control param set incorrectly\n");
2135 ret_val = IXGBE_ERR_CONFIG;
2136 goto out;
2137 break;
2138 } 2075 }
2139 /*
2140 * AUTOC restart handles negotiation of 1G and 10G. There is
2141 * no need to set the PCS1GCTL register.
2142 */
2143 reg |= IXGBE_AUTOC_AN_RESTART;
2144 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2145 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2146 2076
2077 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2147out: 2078out:
2148 return ret_val; 2079 return ret_val;
2149} 2080}
@@ -2159,10 +2090,16 @@ out:
2159 **/ 2090 **/
2160s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2091s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2161{ 2092{
2093 struct ixgbe_adapter *adapter = hw->back;
2162 u32 i; 2094 u32 i;
2163 u32 reg_val; 2095 u32 reg_val;
2164 u32 number_of_queues; 2096 u32 number_of_queues;
2165 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2097 s32 status = 0;
2098 u16 dev_status = 0;
2099
2100 /* Just jump out if bus mastering is already disabled */
2101 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2102 goto out;
2166 2103
2167 /* Disable the receive unit by stopping each queue */ 2104 /* Disable the receive unit by stopping each queue */
2168 number_of_queues = hw->mac.max_rx_queues; 2105 number_of_queues = hw->mac.max_rx_queues;
@@ -2179,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2179 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2180 2117
2181 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2182 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2183 status = 0; 2120 goto check_device_status;
2121 udelay(100);
2122 }
2123
2124 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2125 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2126
2127 /*
2128 * Before proceeding, make sure that the PCIe block does not have
2129 * transactions pending.
2130 */
2131check_device_status:
2132 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2133 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2134 &dev_status);
2135 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2184 break; 2136 break;
2185 }
2186 udelay(100); 2137 udelay(100);
2187 } 2138 }
2188 2139
2140 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2141 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2142 else
2143 goto out;
2144
2145 /*
2146 * Two consecutive resets are required via CTRL.RST per datasheet
2147 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2148 * of this need. The first reset prevents new master requests from
2149 * being issued by our device. We then must wait 1usec for any
2150 * remaining completions from the PCIe bus to trickle in, and then reset
2151 * again to clear out any effects they may have had on our device.
2152 */
2153 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2154
2155out:
2189 return status; 2156 return status;
2190} 2157}
2191 2158
@@ -2195,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2195 * @hw: pointer to hardware structure 2162 * @hw: pointer to hardware structure
2196 * @mask: Mask to specify which semaphore to acquire 2163 * @mask: Mask to specify which semaphore to acquire
2197 * 2164 *
2198 * Acquires the SWFW semaphore thought the GSSR register for the specified 2165 * Acquires the SWFW semaphore through the GSSR register for the specified
2199 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2166 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2200 **/ 2167 **/
2201s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2168s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2206,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2206 s32 timeout = 200; 2173 s32 timeout = 200;
2207 2174
2208 while (timeout) { 2175 while (timeout) {
2176 /*
2177 * SW EEPROM semaphore bit is used for access to all
2178 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2179 */
2209 if (ixgbe_get_eeprom_semaphore(hw)) 2180 if (ixgbe_get_eeprom_semaphore(hw))
2210 return IXGBE_ERR_SWFW_SYNC; 2181 return IXGBE_ERR_SWFW_SYNC;
2211 2182
@@ -2223,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2223 } 2194 }
2224 2195
2225 if (!timeout) { 2196 if (!timeout) {
2226 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2197 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2227 return IXGBE_ERR_SWFW_SYNC; 2198 return IXGBE_ERR_SWFW_SYNC;
2228 } 2199 }
2229 2200
@@ -2239,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2239 * @hw: pointer to hardware structure 2210 * @hw: pointer to hardware structure
2240 * @mask: Mask to specify which semaphore to release 2211 * @mask: Mask to specify which semaphore to release
2241 * 2212 *
2242 * Releases the SWFW semaphore thought the GSSR register for the specified 2213 * Releases the SWFW semaphore through the GSSR register for the specified
2243 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2214 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2244 **/ 2215 **/
2245void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2216void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2427,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2427 u32 mpsar_lo, mpsar_hi; 2398 u32 mpsar_lo, mpsar_hi;
2428 u32 rar_entries = hw->mac.num_rar_entries; 2399 u32 rar_entries = hw->mac.num_rar_entries;
2429 2400
2430 if (rar < rar_entries) { 2401 /* Make sure we are using a valid rar index range */
2431 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2402 if (rar >= rar_entries) {
2432 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2403 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2404 return IXGBE_ERR_INVALID_ARGUMENT;
2405 }
2433 2406
2434 if (!mpsar_lo && !mpsar_hi) 2407 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2435 goto done; 2408 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2436 2409
2437 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2410 if (!mpsar_lo && !mpsar_hi)
2438 if (mpsar_lo) { 2411 goto done;
2439 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2440 mpsar_lo = 0;
2441 }
2442 if (mpsar_hi) {
2443 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2444 mpsar_hi = 0;
2445 }
2446 } else if (vmdq < 32) {
2447 mpsar_lo &= ~(1 << vmdq);
2448 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2449 } else {
2450 mpsar_hi &= ~(1 << (vmdq - 32));
2451 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2452 }
2453 2412
2454 /* was that the last pool using this rar? */ 2413 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2455 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2414 if (mpsar_lo) {
2456 hw->mac.ops.clear_rar(hw, rar); 2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2416 mpsar_lo = 0;
2417 }
2418 if (mpsar_hi) {
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2420 mpsar_hi = 0;
2421 }
2422 } else if (vmdq < 32) {
2423 mpsar_lo &= ~(1 << vmdq);
2424 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2457 } else { 2425 } else {
2458 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2426 mpsar_hi &= ~(1 << (vmdq - 32));
2427 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2459 } 2428 }
2460 2429
2430 /* was that the last pool using this rar? */
2431 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2432 hw->mac.ops.clear_rar(hw, rar);
2461done: 2433done:
2462 return 0; 2434 return 0;
2463} 2435}
@@ -2473,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2473 u32 mpsar; 2445 u32 mpsar;
2474 u32 rar_entries = hw->mac.num_rar_entries; 2446 u32 rar_entries = hw->mac.num_rar_entries;
2475 2447
2476 if (rar < rar_entries) { 2448 /* Make sure we are using a valid rar index range */
2477 if (vmdq < 32) { 2449 if (rar >= rar_entries) {
2478 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2479 mpsar |= 1 << vmdq;
2480 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2481 } else {
2482 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2483 mpsar |= 1 << (vmdq - 32);
2484 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2485 }
2486 } else {
2487 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2450 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2451 return IXGBE_ERR_INVALID_ARGUMENT;
2452 }
2453
2454 if (vmdq < 32) {
2455 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2456 mpsar |= 1 << vmdq;
2457 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2458 } else {
2459 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2460 mpsar |= 1 << (vmdq - 32);
2461 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2488 } 2462 }
2489 return 0; 2463 return 0;
2490} 2464}
@@ -2497,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2497{ 2471{
2498 int i; 2472 int i;
2499 2473
2500
2501 for (i = 0; i < 128; i++) 2474 for (i = 0; i < 128; i++)
2502 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2475 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2503 2476
@@ -2726,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2726 * Reads the links register to determine if link is up and the current speed 2699 * Reads the links register to determine if link is up and the current speed
2727 **/ 2700 **/
2728s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 2701s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2729 bool *link_up, bool link_up_wait_to_complete) 2702 bool *link_up, bool link_up_wait_to_complete)
2730{ 2703{
2731 u32 links_reg; 2704 u32 links_reg, links_orig;
2732 u32 i; 2705 u32 i;
2733 2706
2707 /* clear the old state */
2708 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
2709
2734 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 2710 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2711
2712 if (links_orig != links_reg) {
2713 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
2714 links_orig, links_reg);
2715 }
2716
2735 if (link_up_wait_to_complete) { 2717 if (link_up_wait_to_complete) {
2736 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 2718 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2737 if (links_reg & IXGBE_LINKS_UP) { 2719 if (links_reg & IXGBE_LINKS_UP) {
@@ -2754,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2754 IXGBE_LINKS_SPEED_10G_82599) 2736 IXGBE_LINKS_SPEED_10G_82599)
2755 *speed = IXGBE_LINK_SPEED_10GB_FULL; 2737 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2756 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 2738 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2757 IXGBE_LINKS_SPEED_1G_82599) 2739 IXGBE_LINKS_SPEED_1G_82599)
2758 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2740 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2759 else 2741 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2742 IXGBE_LINKS_SPEED_100_82599)
2760 *speed = IXGBE_LINK_SPEED_100_FULL; 2743 *speed = IXGBE_LINK_SPEED_100_FULL;
2744 else
2745 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2761 2746
2762 /* if link is down, zero out the current_mode */ 2747 /* if link is down, zero out the current_mode */
2763 if (*link_up == false) { 2748 if (*link_up == false) {
@@ -2814,6 +2799,28 @@ wwn_prefix_out:
2814} 2799}
2815 2800
2816/** 2801/**
2802 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
2803 * control
2804 * @hw: pointer to hardware structure
2805 *
2806 * There are several phys that do not support autoneg flow control. This
2807 * function check the device id to see if the associated phy supports
2808 * autoneg flow control.
2809 **/
2810static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
2811{
2812
2813 switch (hw->device_id) {
2814 case IXGBE_DEV_ID_X540T:
2815 return 0;
2816 case IXGBE_DEV_ID_82599_T3_LOM:
2817 return 0;
2818 default:
2819 return IXGBE_ERR_FC_NOT_SUPPORTED;
2820 }
2821}
2822
2823/**
2817 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 2824 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
2818 * @hw: pointer to hardware structure 2825 * @hw: pointer to hardware structure
2819 * @enable: enable or disable switch for anti-spoofing 2826 * @enable: enable or disable switch for anti-spoofing
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 90cceb4a6317..508f635fc2ca 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -63,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
63s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 63s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
64s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 64s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
65 struct net_device *netdev); 65 struct net_device *netdev);
66s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
67 struct net_device *netdev);
68s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 66s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
69s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 67s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
70s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 68s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 13c962efbfc9..c2ee6fcb4e91 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index e5935114815e..515bc27477f6 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 2965edcdac7b..c97cf9160dc0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index 0d2a758effce..1e9750c2b46b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index b0d97a98c84d..beaa1c1c1e67 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 5b0ca85614d1..0b39ab4ffc70 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a977df3fe81b..d7f0024014b1 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 309272f8f103..76380a2b35aa 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -161,29 +161,25 @@ static int ixgbe_get_settings(struct net_device *netdev,
161 } 161 }
162 162
163 ecmd->advertising = ADVERTISED_Autoneg; 163 ecmd->advertising = ADVERTISED_Autoneg;
164 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 164 if (hw->phy.autoneg_advertised) {
165 ecmd->advertising |= ADVERTISED_100baseT_Full; 165 if (hw->phy.autoneg_advertised &
166 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 166 IXGBE_LINK_SPEED_100_FULL)
167 ecmd->advertising |= ADVERTISED_10000baseT_Full; 167 ecmd->advertising |= ADVERTISED_100baseT_Full;
168 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 168 if (hw->phy.autoneg_advertised &
169 ecmd->advertising |= ADVERTISED_1000baseT_Full; 169 IXGBE_LINK_SPEED_10GB_FULL)
170 /* 170 ecmd->advertising |= ADVERTISED_10000baseT_Full;
171 * It's possible that phy.autoneg_advertised may not be 171 if (hw->phy.autoneg_advertised &
172 * set yet. If so display what the default would be - 172 IXGBE_LINK_SPEED_1GB_FULL)
173 * both 1G and 10G supported. 173 ecmd->advertising |= ADVERTISED_1000baseT_Full;
174 */ 174 } else {
175 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 175 /*
176 ADVERTISED_10000baseT_Full))) 176 * Default advertised modes in case
177 * phy.autoneg_advertised isn't set.
178 */
177 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 179 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
178 ADVERTISED_1000baseT_Full); 180 ADVERTISED_1000baseT_Full);
179 181 if (hw->mac.type == ixgbe_mac_X540)
180 switch (hw->mac.type) { 182 ecmd->advertising |= ADVERTISED_100baseT_Full;
181 case ixgbe_mac_X540:
182 if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
183 ecmd->advertising |= (ADVERTISED_100baseT_Full);
184 break;
185 default:
186 break;
187 } 183 }
188 184
189 if (hw->phy.media_type == ixgbe_media_type_copper) { 185 if (hw->phy.media_type == ixgbe_media_type_copper) {
@@ -336,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
336 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 332 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
337 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 333 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
338 334
335 if (ecmd->advertising & ADVERTISED_100baseT_Full)
336 advertised |= IXGBE_LINK_SPEED_100_FULL;
337
339 if (old == advertised) 338 if (old == advertised)
340 return err; 339 return err;
341 /* this sets the link speed and restarts auto-neg */ 340 /* this sets the link speed and restarts auto-neg */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index c54a88274d51..00af15a9cdc6 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -135,22 +135,19 @@ out_ddp_put:
135 return len; 135 return len;
136} 136}
137 137
138
138/** 139/**
139 * ixgbe_fcoe_ddp_get - called to set up ddp context 140 * ixgbe_fcoe_ddp_setup - called to set up ddp context
140 * @netdev: the corresponding net_device 141 * @netdev: the corresponding net_device
141 * @xid: the exchange id requesting ddp 142 * @xid: the exchange id requesting ddp
142 * @sgl: the scatter-gather list for this request 143 * @sgl: the scatter-gather list for this request
143 * @sgc: the number of scatter-gather items 144 * @sgc: the number of scatter-gather items
144 * 145 *
145 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
146 * and is expected to be called from ULD, e.g., FCP layer of libfc
147 * to set up ddp for the corresponding xid of the given sglist for
148 * the corresponding I/O.
149 *
150 * Returns : 1 for success and 0 for no ddp 146 * Returns : 1 for success and 0 for no ddp
151 */ 147 */
152int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 148static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
153 struct scatterlist *sgl, unsigned int sgc) 149 struct scatterlist *sgl, unsigned int sgc,
150 int target_mode)
154{ 151{
155 struct ixgbe_adapter *adapter; 152 struct ixgbe_adapter *adapter;
156 struct ixgbe_hw *hw; 153 struct ixgbe_hw *hw;
@@ -164,7 +161,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
164 unsigned int lastsize; 161 unsigned int lastsize;
165 unsigned int thisoff = 0; 162 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 163 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 164 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
168 dma_addr_t addr = 0; 165 dma_addr_t addr = 0;
169 166
170 if (!netdev || !sgl) 167 if (!netdev || !sgl)
@@ -275,6 +272,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 272 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 273 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 274 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
275 /* Set WRCONTX bit to allow DDP for target */
276 if (target_mode)
277 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
278 fcbuff |= (IXGBE_FCBUFF_VALID); 278 fcbuff |= (IXGBE_FCBUFF_VALID);
279 279
280 fcdmarw = xid; 280 fcdmarw = xid;
@@ -287,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
287 /* program DMA context */ 287 /* program DMA context */
288 hw = &adapter->hw; 288 hw = &adapter->hw;
289 spin_lock_bh(&fcoe->lock); 289 spin_lock_bh(&fcoe->lock);
290
291 /* turn on last frame indication for target mode as FCP_RSPtarget is
292 * supposed to send FCP_RSP when it is done. */
293 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
294 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
295 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
296 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
297 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
298 }
299
290 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 300 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
291 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 301 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
292 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 302 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -295,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
295 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 305 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
296 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 306 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
297 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 307 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
308
298 spin_unlock_bh(&fcoe->lock); 309 spin_unlock_bh(&fcoe->lock);
299 310
300 return 1; 311 return 1;
@@ -309,6 +320,47 @@ out_noddp_unmap:
309} 320}
310 321
311/** 322/**
323 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
324 * @netdev: the corresponding net_device
325 * @xid: the exchange id requesting ddp
326 * @sgl: the scatter-gather list for this request
327 * @sgc: the number of scatter-gather items
328 *
329 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
330 * and is expected to be called from ULD, e.g., FCP layer of libfc
331 * to set up ddp for the corresponding xid of the given sglist for
332 * the corresponding I/O.
333 *
334 * Returns : 1 for success and 0 for no ddp
335 */
336int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
337 struct scatterlist *sgl, unsigned int sgc)
338{
339 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
340}
341
342/**
343 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
344 * @netdev: the corresponding net_device
345 * @xid: the exchange id requesting ddp
346 * @sgl: the scatter-gather list for this request
347 * @sgc: the number of scatter-gather items
348 *
349 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
350 * and is expected to be called from ULD, e.g., FCP layer of libfc
351 * to set up ddp for the corresponding xid of the given sglist for
352 * the corresponding I/O. The DDP in target mode is a write I/O request
353 * from the initiator.
354 *
355 * Returns : 1 for success and 0 for no ddp
356 */
357int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
358 struct scatterlist *sgl, unsigned int sgc)
359{
360 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
361}
362
363/**
312 * ixgbe_fcoe_ddp - check ddp status and mark it done 364 * ixgbe_fcoe_ddp - check ddp status and mark it done
313 * @adapter: ixgbe adapter 365 * @adapter: ixgbe adapter
314 * @rx_desc: advanced rx descriptor 366 * @rx_desc: advanced rx descriptor
@@ -331,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
331 struct ixgbe_fcoe *fcoe; 383 struct ixgbe_fcoe *fcoe;
332 struct ixgbe_fcoe_ddp *ddp; 384 struct ixgbe_fcoe_ddp *ddp;
333 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
386 struct fcoe_crc_eof *crc;
334 387
335 if (!ixgbe_rx_is_fcoe(rx_desc)) 388 if (!ixgbe_rx_is_fcoe(rx_desc))
336 goto ddp_out; 389 goto ddp_out;
@@ -384,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
384 else if (ddp->len) 437 else if (ddp->len)
385 rc = ddp->len; 438 rc = ddp->len;
386 } 439 }
387 440 /* In target mode, check the last data frame of the sequence.
441 * For DDP in target mode, data is already DDPed but the header
442 * indication of the last data frame ould allow is to tell if we
443 * got all the data and the ULP can send FCP_RSP back, as this is
444 * not a full fcoe frame, we fill the trailer here so it won't be
445 * dropped by the ULP stack.
446 */
447 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
448 (fctl & FC_FC_END_SEQ)) {
449 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
450 crc->fcoe_eof = FC_EOF_T;
451 }
388ddp_out: 452ddp_out:
389 return rc; 453 return rc;
390} 454}
@@ -840,5 +904,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
840 } 904 }
841 return rc; 905 return rc;
842} 906}
843
844
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 65cc8fb14fe7..5a650a4ace66 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
52/* fcerr */ 52/* fcerr */
53#define IXGBE_FCERR_BADCRC 0x00100000 53#define IXGBE_FCERR_BADCRC 0x00100000
54 54
55/* FCoE DDP for target mode */
56#define __IXGBE_FCOE_TARGET 1
57
55struct ixgbe_fcoe_ddp { 58struct ixgbe_fcoe_ddp {
56 int len; 59 int len;
57 u32 err; 60 u32 err;
@@ -66,6 +69,7 @@ struct ixgbe_fcoe {
66 u8 tc; 69 u8 tc;
67 u8 up; 70 u8 up;
68#endif 71#endif
72 unsigned long mode;
69 atomic_t refcnt; 73 atomic_t refcnt;
70 spinlock_t lock; 74 spinlock_t lock;
71 struct pci_pool *pool; 75 struct pci_pool *pool;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f0d0c5aad2b4..5998dc94dd5c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,8 @@ static const char ixgbe_driver_string[] =
54 54
55#define DRV_VERSION "3.2.9-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static const char ixgbe_copyright[] =
58 "Copyright (c) 1999-2011 Intel Corporation.";
58 59
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 60static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 61 [board_82598] = &ixgbe_82598_info,
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2597 2598
2598 i--; 2599 i--;
2599 for (; i >= 0; i--) { 2600 for (; i >= 0; i--) {
2601 /* free only the irqs that were actually requested */
2602 if (!adapter->q_vector[i]->rxr_count &&
2603 !adapter->q_vector[i]->txr_count)
2604 continue;
2605
2600 free_irq(adapter->msix_entries[i].vector, 2606 free_irq(adapter->msix_entries[i].vector,
2601 adapter->q_vector[i]); 2607 adapter->q_vector[i]);
2602 } 2608 }
@@ -3769,7 +3775,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3769 if (ret) 3775 if (ret)
3770 goto link_cfg_out; 3776 goto link_cfg_out;
3771 3777
3772 if (hw->mac.ops.get_link_capabilities) 3778 autoneg = hw->phy.autoneg_advertised;
3779 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3773 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, 3780 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3774 &negotiation); 3781 &negotiation);
3775 if (ret) 3782 if (ret)
@@ -3884,7 +3891,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3884 * If we're not hot-pluggable SFP+, we just need to configure link 3891 * If we're not hot-pluggable SFP+, we just need to configure link
3885 * and bring it up. 3892 * and bring it up.
3886 */ 3893 */
3887 if (hw->phy.type == ixgbe_phy_unknown) 3894 if (hw->phy.type == ixgbe_phy_none)
3888 schedule_work(&adapter->sfp_config_module_task); 3895 schedule_work(&adapter->sfp_config_module_task);
3889 3896
3890 /* enable transmits */ 3897 /* enable transmits */
@@ -7013,6 +7020,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7013#endif 7020#endif
7014#ifdef IXGBE_FCOE 7021#ifdef IXGBE_FCOE
7015 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7022 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7023 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
7016 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 7024 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
7017 .ndo_fcoe_enable = ixgbe_fcoe_enable, 7025 .ndo_fcoe_enable = ixgbe_fcoe_enable,
7018 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7026 .ndo_fcoe_disable = ixgbe_fcoe_disable,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index f215c4c296c4..3cf8aec50fcd 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -448,23 +448,20 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
448{ 448{
449 struct ixgbe_mbx_info *mbx = &hw->mbx; 449 struct ixgbe_mbx_info *mbx = &hw->mbx;
450 450
451 switch (hw->mac.type) { 451 if (hw->mac.type != ixgbe_mac_82599EB &&
452 case ixgbe_mac_82599EB: 452 hw->mac.type != ixgbe_mac_X540)
453 case ixgbe_mac_X540: 453 return;
454 mbx->timeout = 0;
455 mbx->usec_delay = 0;
456 454
457 mbx->size = IXGBE_VFMAILBOX_SIZE; 455 mbx->timeout = 0;
456 mbx->udelay = 0;
458 457
459 mbx->stats.msgs_tx = 0; 458 mbx->stats.msgs_tx = 0;
460 mbx->stats.msgs_rx = 0; 459 mbx->stats.msgs_rx = 0;
461 mbx->stats.reqs = 0; 460 mbx->stats.reqs = 0;
462 mbx->stats.acks = 0; 461 mbx->stats.acks = 0;
463 mbx->stats.rsts = 0; 462 mbx->stats.rsts = 0;
464 break; 463
465 default: 464 mbx->size = IXGBE_VFMAILBOX_SIZE;
466 break;
467 }
468} 465}
469#endif /* CONFIG_PCI_IOV */ 466#endif /* CONFIG_PCI_IOV */
470 467
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index ada0ce32a7a6..fe6ea81dc7f8 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8f7123e8fc0a..9190a8fca427 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
57{ 57{
58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
59 u32 phy_addr; 59 u32 phy_addr;
60 u16 ext_ability = 0;
60 61
61 if (hw->phy.type == ixgbe_phy_unknown) { 62 if (hw->phy.type == ixgbe_phy_unknown) {
62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 63 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
65 ixgbe_get_phy_id(hw); 66 ixgbe_get_phy_id(hw);
66 hw->phy.type = 67 hw->phy.type =
67 ixgbe_get_phy_type_from_id(hw->phy.id); 68 ixgbe_get_phy_type_from_id(hw->phy.id);
69
70 if (hw->phy.type == ixgbe_phy_unknown) {
71 hw->phy.ops.read_reg(hw,
72 MDIO_PMA_EXTABLE,
73 MDIO_MMD_PMAPMD,
74 &ext_ability);
75 if (ext_ability &
76 (MDIO_PMA_EXTABLE_10GBT |
77 MDIO_PMA_EXTABLE_1000BT))
78 hw->phy.type =
79 ixgbe_phy_cu_unknown;
80 else
81 hw->phy.type =
82 ixgbe_phy_generic;
83 }
84
68 status = 0; 85 status = 0;
69 break; 86 break;
70 } 87 }
71 } 88 }
72 /* clear value if nothing found */ 89 /* clear value if nothing found */
73 hw->phy.mdio.prtad = 0; 90 if (status != 0)
91 hw->phy.mdio.prtad = 0;
74 } else { 92 } else {
75 status = 0; 93 status = 0;
76 } 94 }
@@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138 **/ 156 **/
139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 157s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
140{ 158{
159 u32 i;
160 u16 ctrl = 0;
161 s32 status = 0;
162
163 if (hw->phy.type == ixgbe_phy_unknown)
164 status = ixgbe_identify_phy_generic(hw);
165
166 if (status != 0 || hw->phy.type == ixgbe_phy_none)
167 goto out;
168
141 /* Don't reset PHY if it's shut down due to overtemp. */ 169 /* Don't reset PHY if it's shut down due to overtemp. */
142 if (!hw->phy.reset_if_overtemp && 170 if (!hw->phy.reset_if_overtemp &&
143 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 171 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
144 return 0; 172 goto out;
145 173
146 /* 174 /*
147 * Perform soft PHY reset to the PHY_XS. 175 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 176 * This will cause a soft reset to the PHY
149 */ 177 */
150 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 178 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
151 MDIO_CTRL1_RESET); 179 MDIO_MMD_PHYXS,
180 MDIO_CTRL1_RESET);
181
182 /*
183 * Poll for reset bit to self-clear indicating reset is complete.
184 * Some PHYs could take up to 3 seconds to complete and need about
185 * 1.7 usec delay after the reset is complete.
186 */
187 for (i = 0; i < 30; i++) {
188 msleep(100);
189 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
190 MDIO_MMD_PHYXS, &ctrl);
191 if (!(ctrl & MDIO_CTRL1_RESET)) {
192 udelay(2);
193 break;
194 }
195 }
196
197 if (ctrl & MDIO_CTRL1_RESET) {
198 status = IXGBE_ERR_RESET_FAILED;
199 hw_dbg(hw, "PHY reset polling failed to complete.\n");
200 }
201
202out:
203 return status;
152} 204}
153 205
154/** 206/**
@@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
171 else 223 else
172 gssr = IXGBE_GSSR_PHY0_SM; 224 gssr = IXGBE_GSSR_PHY0_SM;
173 225
174 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
175 status = IXGBE_ERR_SWFW_SYNC; 227 status = IXGBE_ERR_SWFW_SYNC;
176 228
177 if (status == 0) { 229 if (status == 0) {
@@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
243 } 295 }
244 } 296 }
245 297
246 ixgbe_release_swfw_sync(hw, gssr); 298 hw->mac.ops.release_swfw_sync(hw, gssr);
247 } 299 }
248 300
249 return status; 301 return status;
@@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
269 else 321 else
270 gssr = IXGBE_GSSR_PHY0_SM; 322 gssr = IXGBE_GSSR_PHY0_SM;
271 323
272 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
273 status = IXGBE_ERR_SWFW_SYNC; 325 status = IXGBE_ERR_SWFW_SYNC;
274 326
275 if (status == 0) { 327 if (status == 0) {
@@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
336 } 388 }
337 } 389 }
338 390
339 ixgbe_release_swfw_sync(hw, gssr); 391 hw->mac.ops.release_swfw_sync(hw, gssr);
340 } 392 }
341 393
342 return status; 394 return status;
@@ -556,11 +608,10 @@ out:
556} 608}
557 609
558/** 610/**
559 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns 611 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
560 * the PHY type.
561 * @hw: pointer to hardware structure 612 * @hw: pointer to hardware structure
562 * 613 *
563 * Searches for and indentifies the SFP module. Assings appropriate PHY type. 614 * Searches for and identifies the SFP module and assigns appropriate PHY type.
564 **/ 615 **/
565s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 616s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
566{ 617{
@@ -581,41 +632,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
581 goto out; 632 goto out;
582 } 633 }
583 634
584 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 635 status = hw->phy.ops.read_i2c_eeprom(hw,
636 IXGBE_SFF_IDENTIFIER,
585 &identifier); 637 &identifier);
586 638
587 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { 639 if (status == IXGBE_ERR_SWFW_SYNC ||
588 status = IXGBE_ERR_SFP_NOT_PRESENT; 640 status == IXGBE_ERR_I2C ||
589 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 641 status == IXGBE_ERR_SFP_NOT_PRESENT)
590 if (hw->phy.type != ixgbe_phy_nl) { 642 goto err_read_i2c_eeprom;
591 hw->phy.id = 0;
592 hw->phy.type = ixgbe_phy_unknown;
593 }
594 goto out;
595 }
596 643
597 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { 644 /* LAN ID is needed for sfp_type determination */
598 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, 645 hw->mac.ops.set_lan_id(hw);
599 &comp_codes_1g); 646
600 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 647 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
601 &comp_codes_10g); 648 hw->phy.type = ixgbe_phy_sfp_unsupported;
602 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, 649 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
603 &cable_tech); 650 } else {
604 651 status = hw->phy.ops.read_i2c_eeprom(hw,
605 /* ID Module 652 IXGBE_SFF_1GBE_COMP_CODES,
606 * ========= 653 &comp_codes_1g);
607 * 0 SFP_DA_CU 654
608 * 1 SFP_SR 655 if (status == IXGBE_ERR_SWFW_SYNC ||
609 * 2 SFP_LR 656 status == IXGBE_ERR_I2C ||
610 * 3 SFP_DA_CORE0 - 82599-specific 657 status == IXGBE_ERR_SFP_NOT_PRESENT)
611 * 4 SFP_DA_CORE1 - 82599-specific 658 goto err_read_i2c_eeprom;
612 * 5 SFP_SR/LR_CORE0 - 82599-specific 659
613 * 6 SFP_SR/LR_CORE1 - 82599-specific 660 status = hw->phy.ops.read_i2c_eeprom(hw,
614 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 661 IXGBE_SFF_10GBE_COMP_CODES,
615 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 662 &comp_codes_10g);
616 * 9 SFP_1g_cu_CORE0 - 82599-specific 663
617 * 10 SFP_1g_cu_CORE1 - 82599-specific 664 if (status == IXGBE_ERR_SWFW_SYNC ||
618 */ 665 status == IXGBE_ERR_I2C ||
666 status == IXGBE_ERR_SFP_NOT_PRESENT)
667 goto err_read_i2c_eeprom;
668 status = hw->phy.ops.read_i2c_eeprom(hw,
669 IXGBE_SFF_CABLE_TECHNOLOGY,
670 &cable_tech);
671
672 if (status == IXGBE_ERR_SWFW_SYNC ||
673 status == IXGBE_ERR_I2C ||
674 status == IXGBE_ERR_SFP_NOT_PRESENT)
675 goto err_read_i2c_eeprom;
676
677 /* ID Module
678 * =========
679 * 0 SFP_DA_CU
680 * 1 SFP_SR
681 * 2 SFP_LR
682 * 3 SFP_DA_CORE0 - 82599-specific
683 * 4 SFP_DA_CORE1 - 82599-specific
684 * 5 SFP_SR/LR_CORE0 - 82599-specific
685 * 6 SFP_SR/LR_CORE1 - 82599-specific
686 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
687 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
688 * 9 SFP_1g_cu_CORE0 - 82599-specific
689 * 10 SFP_1g_cu_CORE1 - 82599-specific
690 */
619 if (hw->mac.type == ixgbe_mac_82598EB) { 691 if (hw->mac.type == ixgbe_mac_82598EB) {
620 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 692 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
621 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 693 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +719,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
647 ixgbe_sfp_type_da_act_lmt_core1; 719 ixgbe_sfp_type_da_act_lmt_core1;
648 } else { 720 } else {
649 hw->phy.sfp_type = 721 hw->phy.sfp_type =
650 ixgbe_sfp_type_unknown; 722 ixgbe_sfp_type_unknown;
651 } 723 }
652 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 724 } else if (comp_codes_10g &
653 if (hw->bus.lan_id == 0) 725 (IXGBE_SFF_10GBASESR_CAPABLE |
654 hw->phy.sfp_type = 726 IXGBE_SFF_10GBASELR_CAPABLE)) {
655 ixgbe_sfp_type_srlr_core0;
656 else
657 hw->phy.sfp_type =
658 ixgbe_sfp_type_srlr_core1;
659 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
660 if (hw->bus.lan_id == 0) 727 if (hw->bus.lan_id == 0)
661 hw->phy.sfp_type = 728 hw->phy.sfp_type =
662 ixgbe_sfp_type_srlr_core0; 729 ixgbe_sfp_type_srlr_core0;
663 else 730 else
664 hw->phy.sfp_type = 731 hw->phy.sfp_type =
665 ixgbe_sfp_type_srlr_core1; 732 ixgbe_sfp_type_srlr_core1;
666 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 733 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
667 if (hw->bus.lan_id == 0) 734 if (hw->bus.lan_id == 0)
668 hw->phy.sfp_type = 735 hw->phy.sfp_type =
669 ixgbe_sfp_type_1g_cu_core0; 736 ixgbe_sfp_type_1g_cu_core0;
670 else 737 else
671 hw->phy.sfp_type = 738 hw->phy.sfp_type =
672 ixgbe_sfp_type_1g_cu_core1; 739 ixgbe_sfp_type_1g_cu_core1;
673 else 740 } else {
674 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 741 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
742 }
675 } 743 }
676 744
677 if (hw->phy.sfp_type != stored_sfp_type) 745 if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +756,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
688 /* Determine PHY vendor */ 756 /* Determine PHY vendor */
689 if (hw->phy.type != ixgbe_phy_nl) { 757 if (hw->phy.type != ixgbe_phy_nl) {
690 hw->phy.id = identifier; 758 hw->phy.id = identifier;
691 hw->phy.ops.read_i2c_eeprom(hw, 759 status = hw->phy.ops.read_i2c_eeprom(hw,
692 IXGBE_SFF_VENDOR_OUI_BYTE0, 760 IXGBE_SFF_VENDOR_OUI_BYTE0,
693 &oui_bytes[0]); 761 &oui_bytes[0]);
694 hw->phy.ops.read_i2c_eeprom(hw, 762
763 if (status == IXGBE_ERR_SWFW_SYNC ||
764 status == IXGBE_ERR_I2C ||
765 status == IXGBE_ERR_SFP_NOT_PRESENT)
766 goto err_read_i2c_eeprom;
767
768 status = hw->phy.ops.read_i2c_eeprom(hw,
695 IXGBE_SFF_VENDOR_OUI_BYTE1, 769 IXGBE_SFF_VENDOR_OUI_BYTE1,
696 &oui_bytes[1]); 770 &oui_bytes[1]);
697 hw->phy.ops.read_i2c_eeprom(hw, 771
772 if (status == IXGBE_ERR_SWFW_SYNC ||
773 status == IXGBE_ERR_I2C ||
774 status == IXGBE_ERR_SFP_NOT_PRESENT)
775 goto err_read_i2c_eeprom;
776
777 status = hw->phy.ops.read_i2c_eeprom(hw,
698 IXGBE_SFF_VENDOR_OUI_BYTE2, 778 IXGBE_SFF_VENDOR_OUI_BYTE2,
699 &oui_bytes[2]); 779 &oui_bytes[2]);
700 780
781 if (status == IXGBE_ERR_SWFW_SYNC ||
782 status == IXGBE_ERR_I2C ||
783 status == IXGBE_ERR_SFP_NOT_PRESENT)
784 goto err_read_i2c_eeprom;
785
701 vendor_oui = 786 vendor_oui =
702 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | 787 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
703 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | 788 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +792,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
707 case IXGBE_SFF_VENDOR_OUI_TYCO: 792 case IXGBE_SFF_VENDOR_OUI_TYCO:
708 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 793 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
709 hw->phy.type = 794 hw->phy.type =
710 ixgbe_phy_sfp_passive_tyco; 795 ixgbe_phy_sfp_passive_tyco;
711 break; 796 break;
712 case IXGBE_SFF_VENDOR_OUI_FTL: 797 case IXGBE_SFF_VENDOR_OUI_FTL:
713 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 798 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +809,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
724 default: 809 default:
725 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 810 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
726 hw->phy.type = 811 hw->phy.type =
727 ixgbe_phy_sfp_passive_unknown; 812 ixgbe_phy_sfp_passive_unknown;
728 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 813 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
729 hw->phy.type = 814 hw->phy.type =
730 ixgbe_phy_sfp_active_unknown; 815 ixgbe_phy_sfp_active_unknown;
@@ -734,7 +819,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
734 } 819 }
735 } 820 }
736 821
737 /* All passive DA cables are supported */ 822 /* Allow any DA cable vendor */
738 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 823 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
739 IXGBE_SFF_DA_ACTIVE_CABLE)) { 824 IXGBE_SFF_DA_ACTIVE_CABLE)) {
740 status = 0; 825 status = 0;
@@ -756,7 +841,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
756 goto out; 841 goto out;
757 } 842 }
758 843
759 /* This is guaranteed to be 82599, no need to check for NULL */
760 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 844 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
761 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 845 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
762 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 846 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -776,15 +860,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
776 860
777out: 861out:
778 return status; 862 return status;
863
864err_read_i2c_eeprom:
865 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
866 if (hw->phy.type != ixgbe_phy_nl) {
867 hw->phy.id = 0;
868 hw->phy.type = ixgbe_phy_unknown;
869 }
870 return IXGBE_ERR_SFP_NOT_PRESENT;
779} 871}
780 872
781/** 873/**
782 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see 874 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
783 * if it supports a given SFP+ module type, if so it returns the offsets to the
784 * phy init sequence block.
785 * @hw: pointer to hardware structure 875 * @hw: pointer to hardware structure
786 * @list_offset: offset to the SFP ID list 876 * @list_offset: offset to the SFP ID list
787 * @data_offset: offset to the SFP data block 877 * @data_offset: offset to the SFP data block
878 *
879 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
880 * so it returns the offsets to the phy init sequence block.
788 **/ 881 **/
789s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 882s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
790 u16 *list_offset, 883 u16 *list_offset,
@@ -899,11 +992,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
899 u8 dev_addr, u8 *data) 992 u8 dev_addr, u8 *data)
900{ 993{
901 s32 status = 0; 994 s32 status = 0;
902 u32 max_retry = 1; 995 u32 max_retry = 10;
903 u32 retry = 0; 996 u32 retry = 0;
997 u16 swfw_mask = 0;
904 bool nack = 1; 998 bool nack = 1;
905 999
1000 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1001 swfw_mask = IXGBE_GSSR_PHY1_SM;
1002 else
1003 swfw_mask = IXGBE_GSSR_PHY0_SM;
1004
906 do { 1005 do {
1006 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1007 status = IXGBE_ERR_SWFW_SYNC;
1008 goto read_byte_out;
1009 }
1010
907 ixgbe_i2c_start(hw); 1011 ixgbe_i2c_start(hw);
908 1012
909 /* Device Address and write indication */ 1013 /* Device Address and write indication */
@@ -946,6 +1050,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
946 break; 1050 break;
947 1051
948fail: 1052fail:
1053 ixgbe_release_swfw_sync(hw, swfw_mask);
1054 msleep(100);
949 ixgbe_i2c_bus_clear(hw); 1055 ixgbe_i2c_bus_clear(hw);
950 retry++; 1056 retry++;
951 if (retry < max_retry) 1057 if (retry < max_retry)
@@ -955,6 +1061,9 @@ fail:
955 1061
956 } while (retry < max_retry); 1062 } while (retry < max_retry);
957 1063
1064 ixgbe_release_swfw_sync(hw, swfw_mask);
1065
1066read_byte_out:
958 return status; 1067 return status;
959} 1068}
960 1069
@@ -973,6 +1082,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
973 s32 status = 0; 1082 s32 status = 0;
974 u32 max_retry = 1; 1083 u32 max_retry = 1;
975 u32 retry = 0; 1084 u32 retry = 0;
1085 u16 swfw_mask = 0;
1086
1087 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1088 swfw_mask = IXGBE_GSSR_PHY1_SM;
1089 else
1090 swfw_mask = IXGBE_GSSR_PHY0_SM;
1091
1092 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1093 status = IXGBE_ERR_SWFW_SYNC;
1094 goto write_byte_out;
1095 }
976 1096
977 do { 1097 do {
978 ixgbe_i2c_start(hw); 1098 ixgbe_i2c_start(hw);
@@ -1013,6 +1133,9 @@ fail:
1013 hw_dbg(hw, "I2C byte write error.\n"); 1133 hw_dbg(hw, "I2C byte write error.\n");
1014 } while (retry < max_retry); 1134 } while (retry < max_retry);
1015 1135
1136 ixgbe_release_swfw_sync(hw, swfw_mask);
1137
1138write_byte_out:
1016 return status; 1139 return status;
1017} 1140}
1018 1141
@@ -1331,6 +1454,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1331 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1454 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1332 u32 i; 1455 u32 i;
1333 1456
1457 ixgbe_i2c_start(hw);
1458
1334 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1459 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1335 1460
1336 for (i = 0; i < 9; i++) { 1461 for (i = 0; i < 9; i++) {
@@ -1345,6 +1470,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1345 udelay(IXGBE_I2C_T_LOW); 1470 udelay(IXGBE_I2C_T_LOW);
1346 } 1471 }
1347 1472
1473 ixgbe_i2c_start(hw);
1474
1348 /* Put the i2c bus back to default state */ 1475 /* Put the i2c bus back to default state */
1349 ixgbe_i2c_stop(hw); 1476 ixgbe_i2c_stop(hw);
1350} 1477}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index e2c6b7eac641..9bf2783d7a74 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,6 +58,10 @@
58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
60 60
61/* Flow control defines */
62#define IXGBE_TAF_SYM_PAUSE 0x400
63#define IXGBE_TAF_ASM_PAUSE 0x800
64
61/* Bit-shift macros */ 65/* Bit-shift macros */
62#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 66#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
63#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 67#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index fb4868d0a32d..58c9b45989ff 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 49dc14debef7..e7dd029d576a 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index ab65d13969fd..f190a4a8faf4 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
91 91
92/* General Receive Control */ 92/* General Receive Control */
93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
94#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ 94#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
95 95
96#define IXGBE_VPDDIAG0 0x10204 96#define IXGBE_VPDDIAG0 0x10204
97#define IXGBE_VPDDIAG1 0x10208 97#define IXGBE_VPDDIAG1 0x10208
@@ -342,7 +342,7 @@
342/* Wake Up Control */ 342/* Wake Up Control */
343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ 343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ 344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
345#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ 345#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
346 346
347/* Wake Up Filter Control */ 347/* Wake Up Filter Control */
348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -659,6 +659,8 @@
659#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 659#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
660#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ 660#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
661#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ 661#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
662#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
663#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
662#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ 664#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
663#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ 665#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
664#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ 666#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -669,6 +671,11 @@
669#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ 671#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
670#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ 672#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
671#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ 673#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
674#define IXGBE_PCRC8ECL 0x0E810
675#define IXGBE_PCRC8ECH 0x0E811
676#define IXGBE_PCRC8ECH_MASK 0x1F
677#define IXGBE_LDPCECL 0x0E820
678#define IXGBE_LDPCECH 0x0E821
672 679
673/* Management */ 680/* Management */
674#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ 681#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1614,6 +1621,8 @@
1614#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ 1621#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1615 1622
1616/* PCI Bus Info */ 1623/* PCI Bus Info */
1624#define IXGBE_PCI_DEVICE_STATUS 0xAA
1625#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
1617#define IXGBE_PCI_LINK_STATUS 0xB2 1626#define IXGBE_PCI_LINK_STATUS 0xB2
1618#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1627#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1619#define IXGBE_PCI_LINK_WIDTH 0x3F0 1628#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -2242,6 +2251,7 @@ enum ixgbe_mac_type {
2242 2251
2243enum ixgbe_phy_type { 2252enum ixgbe_phy_type {
2244 ixgbe_phy_unknown = 0, 2253 ixgbe_phy_unknown = 0,
2254 ixgbe_phy_none,
2245 ixgbe_phy_tn, 2255 ixgbe_phy_tn,
2246 ixgbe_phy_aq, 2256 ixgbe_phy_aq,
2247 ixgbe_phy_cu_unknown, 2257 ixgbe_phy_cu_unknown,
@@ -2330,32 +2340,31 @@ enum ixgbe_bus_type {
2330/* PCI bus speeds */ 2340/* PCI bus speeds */
2331enum ixgbe_bus_speed { 2341enum ixgbe_bus_speed {
2332 ixgbe_bus_speed_unknown = 0, 2342 ixgbe_bus_speed_unknown = 0,
2333 ixgbe_bus_speed_33, 2343 ixgbe_bus_speed_33 = 33,
2334 ixgbe_bus_speed_66, 2344 ixgbe_bus_speed_66 = 66,
2335 ixgbe_bus_speed_100, 2345 ixgbe_bus_speed_100 = 100,
2336 ixgbe_bus_speed_120, 2346 ixgbe_bus_speed_120 = 120,
2337 ixgbe_bus_speed_133, 2347 ixgbe_bus_speed_133 = 133,
2338 ixgbe_bus_speed_2500, 2348 ixgbe_bus_speed_2500 = 2500,
2339 ixgbe_bus_speed_5000, 2349 ixgbe_bus_speed_5000 = 5000,
2340 ixgbe_bus_speed_reserved 2350 ixgbe_bus_speed_reserved
2341}; 2351};
2342 2352
2343/* PCI bus widths */ 2353/* PCI bus widths */
2344enum ixgbe_bus_width { 2354enum ixgbe_bus_width {
2345 ixgbe_bus_width_unknown = 0, 2355 ixgbe_bus_width_unknown = 0,
2346 ixgbe_bus_width_pcie_x1, 2356 ixgbe_bus_width_pcie_x1 = 1,
2347 ixgbe_bus_width_pcie_x2, 2357 ixgbe_bus_width_pcie_x2 = 2,
2348 ixgbe_bus_width_pcie_x4 = 4, 2358 ixgbe_bus_width_pcie_x4 = 4,
2349 ixgbe_bus_width_pcie_x8 = 8, 2359 ixgbe_bus_width_pcie_x8 = 8,
2350 ixgbe_bus_width_32, 2360 ixgbe_bus_width_32 = 32,
2351 ixgbe_bus_width_64, 2361 ixgbe_bus_width_64 = 64,
2352 ixgbe_bus_width_reserved 2362 ixgbe_bus_width_reserved
2353}; 2363};
2354 2364
2355struct ixgbe_addr_filter_info { 2365struct ixgbe_addr_filter_info {
2356 u32 num_mc_addrs; 2366 u32 num_mc_addrs;
2357 u32 rar_used_count; 2367 u32 rar_used_count;
2358 u32 mc_addr_in_rar_count;
2359 u32 mta_in_use; 2368 u32 mta_in_use;
2360 u32 overflow_promisc; 2369 u32 overflow_promisc;
2361 bool uc_set_promisc; 2370 bool uc_set_promisc;
@@ -2493,6 +2502,8 @@ struct ixgbe_mac_operations {
2493 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 2502 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
2494 s32 (*setup_sfp)(struct ixgbe_hw *); 2503 s32 (*setup_sfp)(struct ixgbe_hw *);
2495 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2504 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2505 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2506 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2496 2507
2497 /* Link */ 2508 /* Link */
2498 void (*disable_tx_laser)(struct ixgbe_hw *); 2509 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2515,7 +2526,6 @@ struct ixgbe_mac_operations {
2515 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2526 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2516 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2527 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2517 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2528 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2518 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2519 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2529 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2520 s32 (*enable_mc)(struct ixgbe_hw *); 2530 s32 (*enable_mc)(struct ixgbe_hw *);
2521 s32 (*disable_mc)(struct ixgbe_hw *); 2531 s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2556,6 +2566,7 @@ struct ixgbe_eeprom_info {
2556 u16 address_bits; 2566 u16 address_bits;
2557}; 2567};
2558 2568
2569#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
2559struct ixgbe_mac_info { 2570struct ixgbe_mac_info {
2560 struct ixgbe_mac_operations ops; 2571 struct ixgbe_mac_operations ops;
2561 enum ixgbe_mac_type type; 2572 enum ixgbe_mac_type type;
@@ -2566,6 +2577,8 @@ struct ixgbe_mac_info {
2566 u16 wwnn_prefix; 2577 u16 wwnn_prefix;
2567 /* prefix for World Wide Port Name (WWPN) */ 2578 /* prefix for World Wide Port Name (WWPN) */
2568 u16 wwpn_prefix; 2579 u16 wwpn_prefix;
2580#define IXGBE_MAX_MTA 128
2581 u32 mta_shadow[IXGBE_MAX_MTA];
2569 s32 mc_filter_type; 2582 s32 mc_filter_type;
2570 u32 mcft_size; 2583 u32 mcft_size;
2571 u32 vft_size; 2584 u32 vft_size;
@@ -2578,6 +2591,7 @@ struct ixgbe_mac_info {
2578 u32 orig_autoc2; 2591 u32 orig_autoc2;
2579 bool orig_link_settings_stored; 2592 bool orig_link_settings_stored;
2580 bool autotry_restart; 2593 bool autotry_restart;
2594 u8 flags;
2581}; 2595};
2582 2596
2583struct ixgbe_phy_info { 2597struct ixgbe_phy_info {
@@ -2684,7 +2698,9 @@ struct ixgbe_info {
2684#define IXGBE_ERR_EEPROM_VERSION -24 2698#define IXGBE_ERR_EEPROM_VERSION -24
2685#define IXGBE_ERR_NO_SPACE -25 2699#define IXGBE_ERR_NO_SPACE -25
2686#define IXGBE_ERR_OVERTEMP -26 2700#define IXGBE_ERR_OVERTEMP -26
2687#define IXGBE_ERR_RAR_INDEX -27 2701#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2702#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2703#define IXGBE_ERR_FLOW_CONTROL -29
2688#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2704#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2689#define IXGBE_ERR_PBA_SECTION -31 2705#define IXGBE_ERR_PBA_SECTION -31
2690#define IXGBE_ERR_INVALID_ARGUMENT -32 2706#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index f2518b01067d..f47e93fe32be 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35 34
36#define IXGBE_X540_MAX_TX_QUEUES 128 35#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128 36#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset 110 * access and verify no pending requests before reset
112 */ 111 */
113 status = ixgbe_disable_pcie_master(hw); 112 ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118 113
114mac_reset_top:
119 /* 115 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up. 116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when 117 * If link reset is used when link is up, it might reset the PHY when
@@ -148,6 +144,19 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 144 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 145 }
150 146
147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
191 * clear the multicast table. Also reset num_rar_entries to 128, 200 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address. 201 * since we modify this value when programming the SAN MAC address.
193 */ 202 */
194 hw->mac.num_rar_entries = 128; 203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
195 hw->mac.ops.init_rx_addrs(hw); 204 hw->mac.ops.init_rx_addrs(hw);
196 205
197 /* Store the permanent mac address */ 206 /* Store the permanent mac address */
@@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
242} 251}
243 252
244/** 253/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params 254 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure 255 * @hw: pointer to hardware structure
256 *
257 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
258 * ixgbe_hw struct in order to set up EEPROM access.
247 **/ 259 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) 260static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{ 261{
@@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
262 IXGBE_EEPROM_WORD_SIZE_SHIFT); 274 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263 275
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", 276 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size); 277 eeprom->type, eeprom->word_size);
266 } 278 }
267 279
268 return 0; 280 return 0;
@@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{ 290{
279 s32 status; 291 s32 status;
280 292
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) 293 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data); 294 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else 295 else
284 status = IXGBE_ERR_SWFW_SYNC; 296 status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
311 (data << IXGBE_EEPROM_RW_REG_DATA) | 323 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START; 324 IXGBE_EEPROM_RW_REG_START;
313 325
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { 326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 327 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) { 328 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 329 hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
676 .set_vmdq = &ixgbe_set_vmdq_generic, 688 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic, 689 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 690 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 691 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic, 692 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic, 693 .disable_mc = &ixgbe_disable_mc_generic,
@@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
687 .setup_sfp = NULL, 698 .setup_sfp = NULL,
688 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 699 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
689 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 700 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
701 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
702 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
690}; 703};
691 704
692static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 705static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
702 .identify = &ixgbe_identify_phy_generic, 715 .identify = &ixgbe_identify_phy_generic,
703 .identify_sfp = &ixgbe_identify_sfp_module_generic, 716 .identify_sfp = &ixgbe_identify_sfp_module_generic,
704 .init = NULL, 717 .init = NULL,
705 .reset = &ixgbe_reset_phy_generic, 718 .reset = NULL,
706 .read_reg = &ixgbe_read_phy_reg_generic, 719 .read_reg = &ixgbe_read_phy_reg_generic,
707 .write_reg = &ixgbe_write_phy_reg_generic, 720 .write_reg = &ixgbe_write_phy_reg_generic,
708 .setup_link = &ixgbe_setup_phy_link_generic, 721 .setup_link = &ixgbe_setup_phy_link_generic,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 43af761cdb16..82768812552d 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -178,8 +178,6 @@ static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
178 tx_ring->tx_buffer_info[eop].time_stamp && 178 tx_ring->tx_buffer_info[eop].time_stamp &&
179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) { 179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
180 /* detected Tx unit hang */ 180 /* detected Tx unit hang */
181 union ixgbe_adv_tx_desc *tx_desc;
182 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
183 printk(KERN_ERR "Detected Tx Unit Hang\n" 181 printk(KERN_ERR "Detected Tx Unit Hang\n"
184 " Tx Queue <%d>\n" 182 " Tx Queue <%d>\n"
185 " TDH, TDT <%x>, <%x>\n" 183 " TDH, TDT <%x>, <%x>\n"
@@ -334,7 +332,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
334 struct ixgbevf_adapter *adapter = q_vector->adapter; 332 struct ixgbevf_adapter *adapter = q_vector->adapter;
335 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 333 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
336 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 334 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
337 int ret;
338 335
339 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 336 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
340 if (adapter->vlgrp && is_vlan) 337 if (adapter->vlgrp && is_vlan)
@@ -345,9 +342,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
345 napi_gro_receive(&q_vector->napi, skb); 342 napi_gro_receive(&q_vector->napi, skb);
346 } else { 343 } else {
347 if (adapter->vlgrp && is_vlan) 344 if (adapter->vlgrp && is_vlan)
348 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 345 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
349 else 346 else
350 ret = netif_rx(skb); 347 netif_rx(skb);
351 } 348 }
352} 349}
353 350
@@ -2221,7 +2218,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2221 2218
2222 hw->vendor_id = pdev->vendor; 2219 hw->vendor_id = pdev->vendor;
2223 hw->device_id = pdev->device; 2220 hw->device_id = pdev->device;
2224 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2221 hw->revision_id = pdev->revision;
2225 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2222 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2226 hw->subsystem_device_id = pdev->subsystem_device; 2223 hw->subsystem_device_id = pdev->subsystem_device;
2227 2224
@@ -3287,8 +3284,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3287 3284
3288static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3285static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3289{ 3286{
3290 struct ixgbevf_adapter *adapter;
3291 adapter = netdev_priv(dev);
3292 dev->netdev_ops = &ixgbe_netdev_ops; 3287 dev->netdev_ops = &ixgbe_netdev_ops;
3293 ixgbevf_set_ethtool_ops(dev); 3288 ixgbevf_set_ethtool_ops(dev);
3294 dev->watchdog_timeo = 5 * HZ; 3289 dev->watchdog_timeo = 5 * HZ;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 5b441b75e138..f690474f4409 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -3095,7 +3095,7 @@ jme_init_one(struct pci_dev *pdev,
3095 3095
3096 jme_clear_pm(jme); 3096 jme_clear_pm(jme);
3097 jme_set_phyfifo_5level(jme); 3097 jme_set_phyfifo_5level(jme);
3098 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev); 3098 jme->pcirev = pdev->revision;
3099 if (!jme->fpgaver) 3099 if (!jme->fpgaver)
3100 jme_phy_init(jme); 3100 jme_phy_init(jme);
3101 jme_phy_off(jme); 3101 jme_phy_off(jme);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
260 for (i = 0; i < PHY_MAX_ADDR; i++) 260 for (i = 0; i < PHY_MAX_ADDR; i++)
261 bp->mii_bus->irq[i] = PHY_POLL; 261 bp->mii_bus->irq[i] = PHY_POLL;
262 262
263 platform_set_drvdata(bp->dev, bp->mii_bus); 263 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
264 264
265 if (mdiobus_register(bp->mii_bus)) 265 if (mdiobus_register(bp->mii_bus))
266 goto err_out_free_mdio_irq; 266 goto err_out_free_mdio_irq;
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 210b2b164b30..0a6c6a2e7550 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
354 if (!new_carrier) { 354 if (!new_carrier) {
355 netif_carrier_off(mii->dev); 355 netif_carrier_off(mii->dev);
356 if (ok_to_print) 356 if (ok_to_print)
357 printk(KERN_INFO "%s: link down\n", mii->dev->name); 357 netdev_info(mii->dev, "link down\n");
358 return 0; /* duplex did not change */ 358 return 0; /* duplex did not change */
359 } 359 }
360 360
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
381 duplex = 1; 381 duplex = 1;
382 382
383 if (ok_to_print) 383 if (ok_to_print)
384 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", 384 netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
385 mii->dev->name, 385 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
386 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" : 386 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
387 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", 387 100 : 10,
388 duplex ? "full" : "half", 388 duplex ? "full" : "half",
389 lpa); 389 lpa);
390 390
391 if ((init_media) || (mii->full_duplex != duplex)) { 391 if ((init_media) || (mii->full_duplex != duplex)) {
392 mii->full_duplex = duplex; 392 mii->full_duplex = duplex;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 02076e16542a..34425b94452f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 */ 36 */
37 37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
38#include <linux/init.h> 40#include <linux/init.h>
39#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
40#include <linux/in.h> 42#include <linux/in.h>
@@ -627,9 +629,8 @@ err:
627 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
628 (RX_FIRST_DESC | RX_LAST_DESC)) { 630 (RX_FIRST_DESC | RX_LAST_DESC)) {
629 if (net_ratelimit()) 631 if (net_ratelimit())
630 dev_printk(KERN_ERR, &mp->dev->dev, 632 netdev_err(mp->dev,
631 "received packet spanning " 633 "received packet spanning multiple descriptors\n");
632 "multiple descriptors\n");
633 } 634 }
634 635
635 if (cmd_sts & ERROR_SUMMARY) 636 if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
868 869
869 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 870 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
870 txq->tx_dropped++; 871 txq->tx_dropped++;
871 dev_printk(KERN_DEBUG, &dev->dev, 872 netdev_printk(KERN_DEBUG, dev,
872 "failed to linearize skb with tiny " 873 "failed to linearize skb with tiny unaligned fragment\n");
873 "unaligned fragment\n");
874 return NETDEV_TX_BUSY; 874 return NETDEV_TX_BUSY;
875 } 875 }
876 876
877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
878 if (net_ratelimit()) 878 if (net_ratelimit())
879 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 879 netdev_err(dev, "tx queue full?!\n");
880 kfree_skb(skb); 880 kfree_skb(skb);
881 return NETDEV_TX_OK; 881 return NETDEV_TX_OK;
882 } 882 }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
959 skb = __skb_dequeue(&txq->tx_skb); 959 skb = __skb_dequeue(&txq->tx_skb);
960 960
961 if (cmd_sts & ERROR_SUMMARY) { 961 if (cmd_sts & ERROR_SUMMARY) {
962 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 962 netdev_info(mp->dev, "tx error\n");
963 mp->dev->stats.tx_errors++; 963 mp->dev->stats.tx_errors++;
964 } 964 }
965 965
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1122 int ret; 1122 int ret;
1123 1123
1124 if (smi_wait_ready(msp)) { 1124 if (smi_wait_ready(msp)) {
1125 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1125 pr_warn("SMI bus busy timeout\n");
1126 return -ETIMEDOUT; 1126 return -ETIMEDOUT;
1127 } 1127 }
1128 1128
1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1130 1130
1131 if (smi_wait_ready(msp)) { 1131 if (smi_wait_ready(msp)) {
1132 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1132 pr_warn("SMI bus busy timeout\n");
1133 return -ETIMEDOUT; 1133 return -ETIMEDOUT;
1134 } 1134 }
1135 1135
1136 ret = readl(smi_reg); 1136 ret = readl(smi_reg);
1137 if (!(ret & SMI_READ_VALID)) { 1137 if (!(ret & SMI_READ_VALID)) {
1138 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1138 pr_warn("SMI bus read not valid\n");
1139 return -ENODEV; 1139 return -ENODEV;
1140 } 1140 }
1141 1141
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1148 void __iomem *smi_reg = msp->base + SMI_REG; 1148 void __iomem *smi_reg = msp->base + SMI_REG;
1149 1149
1150 if (smi_wait_ready(msp)) { 1150 if (smi_wait_ready(msp)) {
1151 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1151 pr_warn("SMI bus busy timeout\n");
1152 return -ETIMEDOUT; 1152 return -ETIMEDOUT;
1153 } 1153 }
1154 1154
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1156 (addr << 16) | (val & 0xffff), smi_reg); 1156 (addr << 16) | (val & 0xffff), smi_reg);
1157 1157
1158 if (smi_wait_ready(msp)) { 1158 if (smi_wait_ready(msp)) {
1159 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1159 pr_warn("SMI bus busy timeout\n");
1160 return -ETIMEDOUT; 1160 return -ETIMEDOUT;
1161 } 1161 }
1162 1162
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1566 if (netif_running(dev)) { 1566 if (netif_running(dev)) {
1567 mv643xx_eth_stop(dev); 1567 mv643xx_eth_stop(dev);
1568 if (mv643xx_eth_open(dev)) { 1568 if (mv643xx_eth_open(dev)) {
1569 dev_printk(KERN_ERR, &dev->dev, 1569 netdev_err(dev,
1570 "fatal error on re-opening device after " 1570 "fatal error on re-opening device after ring param change\n");
1571 "ring param change\n");
1572 return -ENOMEM; 1571 return -ENOMEM;
1573 } 1572 }
1574 } 1573 }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1874 } 1873 }
1875 1874
1876 if (rxq->rx_desc_area == NULL) { 1875 if (rxq->rx_desc_area == NULL) {
1877 dev_printk(KERN_ERR, &mp->dev->dev, 1876 netdev_err(mp->dev,
1878 "can't allocate rx ring (%d bytes)\n", size); 1877 "can't allocate rx ring (%d bytes)\n", size);
1879 goto out; 1878 goto out;
1880 } 1879 }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1884 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1883 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1885 GFP_KERNEL); 1884 GFP_KERNEL);
1886 if (rxq->rx_skb == NULL) { 1885 if (rxq->rx_skb == NULL) {
1887 dev_printk(KERN_ERR, &mp->dev->dev, 1886 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1888 "can't allocate rx skb ring\n");
1889 goto out_free; 1887 goto out_free;
1890 } 1888 }
1891 1889
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1944 } 1942 }
1945 1943
1946 if (rxq->rx_desc_count) { 1944 if (rxq->rx_desc_count) {
1947 dev_printk(KERN_ERR, &mp->dev->dev, 1945 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1948 "error freeing rx ring -- %d skbs stuck\n",
1949 rxq->rx_desc_count); 1946 rxq->rx_desc_count);
1950 } 1947 }
1951 1948
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1987 } 1984 }
1988 1985
1989 if (txq->tx_desc_area == NULL) { 1986 if (txq->tx_desc_area == NULL) {
1990 dev_printk(KERN_ERR, &mp->dev->dev, 1987 netdev_err(mp->dev,
1991 "can't allocate tx ring (%d bytes)\n", size); 1988 "can't allocate tx ring (%d bytes)\n", size);
1992 return -ENOMEM; 1989 return -ENOMEM;
1993 } 1990 }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2093 if (netif_carrier_ok(dev)) { 2090 if (netif_carrier_ok(dev)) {
2094 int i; 2091 int i;
2095 2092
2096 printk(KERN_INFO "%s: link down\n", dev->name); 2093 netdev_info(dev, "link down\n");
2097 2094
2098 netif_carrier_off(dev); 2095 netif_carrier_off(dev);
2099 2096
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2124 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2121 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2125 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2122 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2126 2123
2127 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2124 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2128 "flow control %sabled\n", dev->name, 2125 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2129 speed, duplex ? "full" : "half",
2130 fc ? "en" : "dis");
2131 2126
2132 if (!netif_carrier_ok(dev)) 2127 if (!netif_carrier_ok(dev))
2133 netif_carrier_on(dev); 2128 netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2337 err = request_irq(dev->irq, mv643xx_eth_irq, 2332 err = request_irq(dev->irq, mv643xx_eth_irq,
2338 IRQF_SHARED, dev->name, dev); 2333 IRQF_SHARED, dev->name, dev);
2339 if (err) { 2334 if (err) {
2340 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2335 netdev_err(dev, "can't assign irq\n");
2341 return -EAGAIN; 2336 return -EAGAIN;
2342 } 2337 }
2343 2338
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2483 */ 2478 */
2484 mv643xx_eth_stop(dev); 2479 mv643xx_eth_stop(dev);
2485 if (mv643xx_eth_open(dev)) { 2480 if (mv643xx_eth_open(dev)) {
2486 dev_printk(KERN_ERR, &dev->dev, 2481 netdev_err(dev,
2487 "fatal error on re-opening device after " 2482 "fatal error on re-opening device after MTU change\n");
2488 "MTU change\n");
2489 } 2483 }
2490 2484
2491 return 0; 2485 return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
2508{ 2502{
2509 struct mv643xx_eth_private *mp = netdev_priv(dev); 2503 struct mv643xx_eth_private *mp = netdev_priv(dev);
2510 2504
2511 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2505 netdev_info(dev, "tx timeout\n");
2512 2506
2513 schedule_work(&mp->tx_timeout_task); 2507 schedule_work(&mp->tx_timeout_task);
2514} 2508}
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2603 int ret; 2597 int ret;
2604 2598
2605 if (!mv643xx_eth_version_printed++) 2599 if (!mv643xx_eth_version_printed++)
2606 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2600 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2607 "driver version %s\n", mv643xx_eth_driver_version); 2601 mv643xx_eth_driver_version);
2608 2602
2609 ret = -EINVAL; 2603 ret = -EINVAL;
2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2871 2865
2872 pd = pdev->dev.platform_data; 2866 pd = pdev->dev.platform_data;
2873 if (pd == NULL) { 2867 if (pd == NULL) {
2874 dev_printk(KERN_ERR, &pdev->dev, 2868 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2875 "no mv643xx_eth_platform_data\n");
2876 return -ENODEV; 2869 return -ENODEV;
2877 } 2870 }
2878 2871
2879 if (pd->shared == NULL) { 2872 if (pd->shared == NULL) {
2880 dev_printk(KERN_ERR, &pdev->dev, 2873 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2881 "no mv643xx_eth_platform_data->shared\n");
2882 return -ENODEV; 2874 return -ENODEV;
2883 } 2875 }
2884 2876
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 if (err) 2949 if (err)
2958 goto out; 2950 goto out;
2959 2951
2960 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2952 netdev_notice(dev, "port %d with MAC address %pM\n",
2961 mp->port_num, dev->dev_addr); 2953 mp->port_num, dev->dev_addr);
2962 2954
2963 if (mp->tx_desc_sram_size > 0) 2955 if (mp->tx_desc_sram_size > 0)
2964 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2956 netdev_notice(dev, "configured with sram\n");
2965 2957
2966 return 0; 2958 return 0;
2967 2959
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
695 PCMCIA_DEVICE_NULL, 696 PCMCIA_DEVICE_NULL,
696}; 697};
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 164cfad6ce79..1af549c89d51 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
175 struct pptp_opt *opt = &po->proto.pptp; 175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr; 176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr); 177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp; 178 int islcp;
180 int len; 179 int len;
181 unsigned char *data; 180 unsigned char *data;
@@ -198,8 +197,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
198 .saddr = opt->src_addr.sin_addr.s_addr, 197 .saddr = opt->src_addr.sin_addr.s_addr,
199 .tos = RT_TOS(0) } }, 198 .tos = RT_TOS(0) } },
200 .proto = IPPROTO_GRE }; 199 .proto = IPPROTO_GRE };
201 err = ip_route_output_key(&init_net, &rt, &fl); 200 rt = ip_route_output_key(&init_net, &fl);
202 if (err) 201 if (IS_ERR(rt))
203 goto tx_error; 202 goto tx_error;
204 } 203 }
205 tdev = rt->dst.dev; 204 tdev = rt->dst.dev;
@@ -477,7 +476,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
477 .tos = RT_CONN_FLAGS(sk) } }, 476 .tos = RT_CONN_FLAGS(sk) } },
478 .proto = IPPROTO_GRE }; 477 .proto = IPPROTO_GRE };
479 security_sk_classify_flow(sk, &fl); 478 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) { 479 rt = ip_route_output_key(&init_net, &fl);
480 if (IS_ERR(rt)) {
481 error = -EHOSTUNREACH; 481 error = -EHOSTUNREACH;
482 goto end; 482 goto end;
483 } 483 }
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 1a3584edd79c..2d21c60085bc 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
379{ 379{
380 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
381 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 382 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
383 383
384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
398 u32 previousBit; 398 u32 previousBit;
399 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
400 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 401 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
402 402
403 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
404 ql_write_nvram_reg(qdev, spir, 404 ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
467{ 467{
468 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
469 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 470 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471 471
472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
483 u32 dataBit; 483 u32 dataBit;
484 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 486 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
487 487
488 /* Read the data bits */ 488 /* Read the data bits */
489 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3011 u32 value; 3011 u32 value;
3012 struct ql3xxx_port_registers __iomem *port_regs = 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers; 3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3016 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3017 u32 delay = 10; 3017 u32 delay = 10;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 469ab0b7ce31..5e403511289d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/pci-aspm.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/io.h> 31#include <asm/io.h>
@@ -36,6 +37,7 @@
36 37
37#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" 38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
38#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" 39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
39 41
40#ifdef RTL8169_DEBUG 42#ifdef RTL8169_DEBUG
41#define assert(expr) \ 43#define assert(expr) \
@@ -123,6 +125,8 @@ enum mac_version {
123 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D 125 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
124 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP 126 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
125 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP 127 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
128 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
129 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
126}; 130};
127 131
128#define _R(NAME,MAC,MASK) \ 132#define _R(NAME,MAC,MASK) \
@@ -160,7 +164,9 @@ static const struct {
160 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E 164 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
161 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E 165 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
162 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E 166 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
163 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880) // PCI-E 167 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
168 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
169 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
164}; 170};
165#undef _R 171#undef _R
166 172
@@ -267,9 +273,15 @@ enum rtl8168_8101_registers {
267#define EPHYAR_REG_MASK 0x1f 273#define EPHYAR_REG_MASK 0x1f
268#define EPHYAR_REG_SHIFT 16 274#define EPHYAR_REG_SHIFT 16
269#define EPHYAR_DATA_MASK 0xffff 275#define EPHYAR_DATA_MASK 0xffff
276 DLLPR = 0xd0,
277#define PM_SWITCH (1 << 6)
270 DBG_REG = 0xd1, 278 DBG_REG = 0xd1,
271#define FIX_NAK_1 (1 << 4) 279#define FIX_NAK_1 (1 << 4)
272#define FIX_NAK_2 (1 << 3) 280#define FIX_NAK_2 (1 << 3)
281 TWSI = 0xd2,
282 MCU = 0xd3,
283#define EN_NDP (1 << 3)
284#define EN_OOB_RESET (1 << 2)
273 EFUSEAR = 0xdc, 285 EFUSEAR = 0xdc,
274#define EFUSEAR_FLAG 0x80000000 286#define EFUSEAR_FLAG 0x80000000
275#define EFUSEAR_WRITE_CMD 0x80000000 287#define EFUSEAR_WRITE_CMD 0x80000000
@@ -526,9 +538,6 @@ struct rtl8169_private {
526 u16 napi_event; 538 u16 napi_event;
527 u16 intr_mask; 539 u16 intr_mask;
528 int phy_1000_ctrl_reg; 540 int phy_1000_ctrl_reg;
529#ifdef CONFIG_R8169_VLAN
530 struct vlan_group *vlgrp;
531#endif
532 541
533 struct mdio_ops { 542 struct mdio_ops {
534 void (*write)(void __iomem *, int, int); 543 void (*write)(void __iomem *, int, int);
@@ -540,7 +549,7 @@ struct rtl8169_private {
540 void (*up)(struct rtl8169_private *); 549 void (*up)(struct rtl8169_private *);
541 } pll_power_ops; 550 } pll_power_ops;
542 551
543 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 552 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
544 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 553 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
545 void (*phy_reset_enable)(struct rtl8169_private *tp); 554 void (*phy_reset_enable)(struct rtl8169_private *tp);
546 void (*hw_start)(struct net_device *); 555 void (*hw_start)(struct net_device *);
@@ -568,6 +577,7 @@ MODULE_LICENSE("GPL");
568MODULE_VERSION(RTL8169_VERSION); 577MODULE_VERSION(RTL8169_VERSION);
569MODULE_FIRMWARE(FIRMWARE_8168D_1); 578MODULE_FIRMWARE(FIRMWARE_8168D_1);
570MODULE_FIRMWARE(FIRMWARE_8168D_2); 579MODULE_FIRMWARE(FIRMWARE_8168D_2);
580MODULE_FIRMWARE(FIRMWARE_8105E_1);
571 581
572static int rtl8169_open(struct net_device *dev); 582static int rtl8169_open(struct net_device *dev);
573static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 583static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -617,8 +627,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
617 } 627 }
618} 628}
619 629
620static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) 630static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
621{ 631{
632 void __iomem *ioaddr = tp->mmio_addr;
622 int i; 633 int i;
623 634
624 RTL_W8(ERIDR, cmd); 635 RTL_W8(ERIDR, cmd);
@@ -630,7 +641,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
630 break; 641 break;
631 } 642 }
632 643
633 ocp_write(ioaddr, 0x1, 0x30, 0x00000001); 644 ocp_write(tp, 0x1, 0x30, 0x00000001);
634} 645}
635 646
636#define OOB_CMD_RESET 0x00 647#define OOB_CMD_RESET 0x00
@@ -1096,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
1096} 1107}
1097 1108
1098static int rtl8169_set_speed_tbi(struct net_device *dev, 1109static int rtl8169_set_speed_tbi(struct net_device *dev,
1099 u8 autoneg, u16 speed, u8 duplex) 1110 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1100{ 1111{
1101 struct rtl8169_private *tp = netdev_priv(dev); 1112 struct rtl8169_private *tp = netdev_priv(dev);
1102 void __iomem *ioaddr = tp->mmio_addr; 1113 void __iomem *ioaddr = tp->mmio_addr;
@@ -1119,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
1119} 1130}
1120 1131
1121static int rtl8169_set_speed_xmii(struct net_device *dev, 1132static int rtl8169_set_speed_xmii(struct net_device *dev,
1122 u8 autoneg, u16 speed, u8 duplex) 1133 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1123{ 1134{
1124 struct rtl8169_private *tp = netdev_priv(dev); 1135 struct rtl8169_private *tp = netdev_priv(dev);
1125 int giga_ctrl, bmcr; 1136 int giga_ctrl, bmcr;
1137 int rc = -EINVAL;
1138
1139 rtl_writephy(tp, 0x1f, 0x0000);
1126 1140
1127 if (autoneg == AUTONEG_ENABLE) { 1141 if (autoneg == AUTONEG_ENABLE) {
1128 int auto_nego; 1142 int auto_nego;
1129 1143
1130 auto_nego = rtl_readphy(tp, MII_ADVERTISE); 1144 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1131 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1145 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1132 ADVERTISE_100HALF | ADVERTISE_100FULL); 1146 ADVERTISE_100HALF | ADVERTISE_100FULL);
1147
1148 if (adv & ADVERTISED_10baseT_Half)
1149 auto_nego |= ADVERTISE_10HALF;
1150 if (adv & ADVERTISED_10baseT_Full)
1151 auto_nego |= ADVERTISE_10FULL;
1152 if (adv & ADVERTISED_100baseT_Half)
1153 auto_nego |= ADVERTISE_100HALF;
1154 if (adv & ADVERTISED_100baseT_Full)
1155 auto_nego |= ADVERTISE_100FULL;
1156
1133 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1157 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1134 1158
1135 giga_ctrl = rtl_readphy(tp, MII_CTRL1000); 1159 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
@@ -1143,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1143 (tp->mac_version != RTL_GIGA_MAC_VER_13) && 1167 (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
1144 (tp->mac_version != RTL_GIGA_MAC_VER_14) && 1168 (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
1145 (tp->mac_version != RTL_GIGA_MAC_VER_15) && 1169 (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
1146 (tp->mac_version != RTL_GIGA_MAC_VER_16)) { 1170 (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
1147 giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; 1171 (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
1148 } else { 1172 (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
1173 if (adv & ADVERTISED_1000baseT_Half)
1174 giga_ctrl |= ADVERTISE_1000HALF;
1175 if (adv & ADVERTISED_1000baseT_Full)
1176 giga_ctrl |= ADVERTISE_1000FULL;
1177 } else if (adv & (ADVERTISED_1000baseT_Half |
1178 ADVERTISED_1000baseT_Full)) {
1149 netif_info(tp, link, dev, 1179 netif_info(tp, link, dev,
1150 "PHY does not support 1000Mbps\n"); 1180 "PHY does not support 1000Mbps\n");
1181 goto out;
1151 } 1182 }
1152 1183
1153 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; 1184 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1154 1185
1155 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
1156 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
1157 (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
1158 /*
1159 * Wake up the PHY.
1160 * Vendor specific (0x1f) and reserved (0x0e) MII
1161 * registers.
1162 */
1163 rtl_writephy(tp, 0x1f, 0x0000);
1164 rtl_writephy(tp, 0x0e, 0x0000);
1165 }
1166
1167 rtl_writephy(tp, MII_ADVERTISE, auto_nego); 1186 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1168 rtl_writephy(tp, MII_CTRL1000, giga_ctrl); 1187 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1169 } else { 1188 } else {
@@ -1174,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1174 else if (speed == SPEED_100) 1193 else if (speed == SPEED_100)
1175 bmcr = BMCR_SPEED100; 1194 bmcr = BMCR_SPEED100;
1176 else 1195 else
1177 return -EINVAL; 1196 goto out;
1178 1197
1179 if (duplex == DUPLEX_FULL) 1198 if (duplex == DUPLEX_FULL)
1180 bmcr |= BMCR_FULLDPLX; 1199 bmcr |= BMCR_FULLDPLX;
1181
1182 rtl_writephy(tp, 0x1f, 0x0000);
1183 } 1200 }
1184 1201
1185 tp->phy_1000_ctrl_reg = giga_ctrl; 1202 tp->phy_1000_ctrl_reg = giga_ctrl;
@@ -1197,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1197 } 1214 }
1198 } 1215 }
1199 1216
1200 return 0; 1217 rc = 0;
1218out:
1219 return rc;
1201} 1220}
1202 1221
1203static int rtl8169_set_speed(struct net_device *dev, 1222static int rtl8169_set_speed(struct net_device *dev,
1204 u8 autoneg, u16 speed, u8 duplex) 1223 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1205{ 1224{
1206 struct rtl8169_private *tp = netdev_priv(dev); 1225 struct rtl8169_private *tp = netdev_priv(dev);
1207 int ret; 1226 int ret;
1208 1227
1209 ret = tp->set_speed(dev, autoneg, speed, duplex); 1228 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1210 1229
1211 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1230 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1212 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); 1231 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1221,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1221 int ret; 1240 int ret;
1222 1241
1223 spin_lock_irqsave(&tp->lock, flags); 1242 spin_lock_irqsave(&tp->lock, flags);
1224 ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); 1243 ret = rtl8169_set_speed(dev,
1244 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1225 spin_unlock_irqrestore(&tp->lock, flags); 1245 spin_unlock_irqrestore(&tp->lock, flags);
1226 1246
1227 return ret; 1247 return ret;
@@ -1255,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1255 return 0; 1275 return 0;
1256} 1276}
1257 1277
1258#ifdef CONFIG_R8169_VLAN
1259
1260static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1278static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1261 struct sk_buff *skb) 1279 struct sk_buff *skb)
1262{ 1280{
@@ -1264,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1264 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1282 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1265} 1283}
1266 1284
1267static void rtl8169_vlan_rx_register(struct net_device *dev, 1285#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
1268 struct vlan_group *grp) 1286
1287static void rtl8169_vlan_mode(struct net_device *dev)
1269{ 1288{
1270 struct rtl8169_private *tp = netdev_priv(dev); 1289 struct rtl8169_private *tp = netdev_priv(dev);
1271 void __iomem *ioaddr = tp->mmio_addr; 1290 void __iomem *ioaddr = tp->mmio_addr;
1272 unsigned long flags; 1291 unsigned long flags;
1273 1292
1274 spin_lock_irqsave(&tp->lock, flags); 1293 spin_lock_irqsave(&tp->lock, flags);
1275 tp->vlgrp = grp; 1294 if (dev->features & NETIF_F_HW_VLAN_RX)
1276 /*
1277 * Do not disable RxVlan on 8110SCd.
1278 */
1279 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1280 tp->cp_cmd |= RxVlan; 1295 tp->cp_cmd |= RxVlan;
1281 else 1296 else
1282 tp->cp_cmd &= ~RxVlan; 1297 tp->cp_cmd &= ~RxVlan;
1283 RTL_W16(CPlusCmd, tp->cp_cmd); 1298 RTL_W16(CPlusCmd, tp->cp_cmd);
1299 /* PCI commit */
1284 RTL_R16(CPlusCmd); 1300 RTL_R16(CPlusCmd);
1285 spin_unlock_irqrestore(&tp->lock, flags); 1301 spin_unlock_irqrestore(&tp->lock, flags);
1302
1303 dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
1286} 1304}
1287 1305
1288static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1306static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1289 struct sk_buff *skb, int polling)
1290{ 1307{
1291 u32 opts2 = le32_to_cpu(desc->opts2); 1308 u32 opts2 = le32_to_cpu(desc->opts2);
1292 struct vlan_group *vlgrp = tp->vlgrp;
1293 int ret;
1294 1309
1295 if (vlgrp && (opts2 & RxVlanTag)) { 1310 if (opts2 & RxVlanTag)
1296 u16 vtag = swab16(opts2 & 0xffff); 1311 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1297 1312
1298 if (likely(polling))
1299 vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1300 else
1301 __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1302 ret = 0;
1303 } else
1304 ret = -1;
1305 desc->opts2 = 0; 1313 desc->opts2 = 0;
1306 return ret;
1307}
1308
1309#else /* !CONFIG_R8169_VLAN */
1310
1311static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1312 struct sk_buff *skb)
1313{
1314 return 0;
1315}
1316
1317static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1318 struct sk_buff *skb, int polling)
1319{
1320 return -1;
1321} 1314}
1322 1315
1323#endif
1324
1325static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 1316static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1326{ 1317{
1327 struct rtl8169_private *tp = netdev_priv(dev); 1318 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1492,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1492 } 1483 }
1493} 1484}
1494 1485
1486static int rtl8169_set_flags(struct net_device *dev, u32 data)
1487{
1488 struct rtl8169_private *tp = netdev_priv(dev);
1489 unsigned long old_feat = dev->features;
1490 int rc;
1491
1492 if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
1493 !(data & ETH_FLAG_RXVLAN)) {
1494 netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
1495 return -EINVAL;
1496 }
1497
1498 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
1499 if (rc)
1500 return rc;
1501
1502 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
1503 rtl8169_vlan_mode(dev);
1504
1505 return 0;
1506}
1507
1495static const struct ethtool_ops rtl8169_ethtool_ops = { 1508static const struct ethtool_ops rtl8169_ethtool_ops = {
1496 .get_drvinfo = rtl8169_get_drvinfo, 1509 .get_drvinfo = rtl8169_get_drvinfo,
1497 .get_regs_len = rtl8169_get_regs_len, 1510 .get_regs_len = rtl8169_get_regs_len,
@@ -1511,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1511 .get_strings = rtl8169_get_strings, 1524 .get_strings = rtl8169_get_strings,
1512 .get_sset_count = rtl8169_get_sset_count, 1525 .get_sset_count = rtl8169_get_sset_count,
1513 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1526 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1527 .set_flags = rtl8169_set_flags,
1528 .get_flags = ethtool_op_get_flags,
1514}; 1529};
1515 1530
1516static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1531static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1559,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1559 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1574 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1560 1575
1561 /* 8101 family. */ 1576 /* 8101 family. */
1577 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1578 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1579 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
1562 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, 1580 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1563 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, 1581 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1564 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, 1582 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
@@ -2435,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2435 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2453 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2436} 2454}
2437 2455
2456static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
2457{
2458 static const struct phy_reg phy_reg_init[] = {
2459 { 0x1f, 0x0005 },
2460 { 0x1a, 0x0000 },
2461 { 0x1f, 0x0000 },
2462
2463 { 0x1f, 0x0004 },
2464 { 0x1c, 0x0000 },
2465 { 0x1f, 0x0000 },
2466
2467 { 0x1f, 0x0001 },
2468 { 0x15, 0x7701 },
2469 { 0x1f, 0x0000 }
2470 };
2471
2472 /* Disable ALDPS before ram code */
2473 rtl_writephy(tp, 0x1f, 0x0000);
2474 rtl_writephy(tp, 0x18, 0x0310);
2475 msleep(100);
2476
2477 if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
2478 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2479
2480 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2481}
2482
2438static void rtl_hw_phy_config(struct net_device *dev) 2483static void rtl_hw_phy_config(struct net_device *dev)
2439{ 2484{
2440 struct rtl8169_private *tp = netdev_priv(dev); 2485 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2502,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
2502 case RTL_GIGA_MAC_VER_28: 2547 case RTL_GIGA_MAC_VER_28:
2503 rtl8168d_4_hw_phy_config(tp); 2548 rtl8168d_4_hw_phy_config(tp);
2504 break; 2549 break;
2550 case RTL_GIGA_MAC_VER_29:
2551 case RTL_GIGA_MAC_VER_30:
2552 rtl8105e_hw_phy_config(tp);
2553 break;
2505 2554
2506 default: 2555 default:
2507 break; 2556 break;
@@ -2633,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2633 2682
2634 rtl8169_phy_reset(dev, tp); 2683 rtl8169_phy_reset(dev, tp);
2635 2684
2636 /* 2685 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
2637 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet 2686 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2638 * only 8101. Don't panic. 2687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2639 */ 2688 tp->mii.supports_gmii ?
2640 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); 2689 ADVERTISED_1000baseT_Half |
2690 ADVERTISED_1000baseT_Full : 0);
2641 2691
2642 if (RTL_R8(PHYstatus) & TBI_Enable) 2692 if (RTL_R8(PHYstatus) & TBI_Enable)
2643 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 2693 netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2793,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
2793 .ndo_set_mac_address = rtl_set_mac_address, 2843 .ndo_set_mac_address = rtl_set_mac_address,
2794 .ndo_do_ioctl = rtl8169_ioctl, 2844 .ndo_do_ioctl = rtl8169_ioctl,
2795 .ndo_set_multicast_list = rtl_set_rx_mode, 2845 .ndo_set_multicast_list = rtl_set_rx_mode,
2796#ifdef CONFIG_R8169_VLAN
2797 .ndo_vlan_rx_register = rtl8169_vlan_rx_register,
2798#endif
2799#ifdef CONFIG_NET_POLL_CONTROLLER 2846#ifdef CONFIG_NET_POLL_CONTROLLER
2800 .ndo_poll_controller = rtl8169_netpoll, 2847 .ndo_poll_controller = rtl8169_netpoll,
2801#endif 2848#endif
@@ -2868,8 +2915,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2868{ 2915{
2869 void __iomem *ioaddr = tp->mmio_addr; 2916 void __iomem *ioaddr = tp->mmio_addr;
2870 2917
2871 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2918 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2919 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2920 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2872 return; 2921 return;
2922 }
2873 2923
2874 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || 2924 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
2875 (tp->mac_version == RTL_GIGA_MAC_VER_24)) && 2925 (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2941,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2891 switch (tp->mac_version) { 2941 switch (tp->mac_version) {
2892 case RTL_GIGA_MAC_VER_25: 2942 case RTL_GIGA_MAC_VER_25:
2893 case RTL_GIGA_MAC_VER_26: 2943 case RTL_GIGA_MAC_VER_26:
2944 case RTL_GIGA_MAC_VER_27:
2945 case RTL_GIGA_MAC_VER_28:
2894 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 2946 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2895 break; 2947 break;
2896 } 2948 }
@@ -2900,12 +2952,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2900{ 2952{
2901 void __iomem *ioaddr = tp->mmio_addr; 2953 void __iomem *ioaddr = tp->mmio_addr;
2902 2954
2903 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2955 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2956 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2957 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2904 return; 2958 return;
2959 }
2905 2960
2906 switch (tp->mac_version) { 2961 switch (tp->mac_version) {
2907 case RTL_GIGA_MAC_VER_25: 2962 case RTL_GIGA_MAC_VER_25:
2908 case RTL_GIGA_MAC_VER_26: 2963 case RTL_GIGA_MAC_VER_26:
2964 case RTL_GIGA_MAC_VER_27:
2965 case RTL_GIGA_MAC_VER_28:
2909 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 2966 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2910 break; 2967 break;
2911 } 2968 }
@@ -2940,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
2940 case RTL_GIGA_MAC_VER_09: 2997 case RTL_GIGA_MAC_VER_09:
2941 case RTL_GIGA_MAC_VER_10: 2998 case RTL_GIGA_MAC_VER_10:
2942 case RTL_GIGA_MAC_VER_16: 2999 case RTL_GIGA_MAC_VER_16:
3000 case RTL_GIGA_MAC_VER_29:
3001 case RTL_GIGA_MAC_VER_30:
2943 ops->down = r810x_pll_power_down; 3002 ops->down = r810x_pll_power_down;
2944 ops->up = r810x_pll_power_up; 3003 ops->up = r810x_pll_power_up;
2945 break; 3004 break;
@@ -3009,6 +3068,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3009 mii->reg_num_mask = 0x1f; 3068 mii->reg_num_mask = 0x1f;
3010 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3069 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3011 3070
3071 /* disable ASPM completely as that cause random device stop working
3072 * problems as well as full system hangs for some PCIe devices users */
3073 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3074 PCIE_LINK_STATE_CLKPM);
3075
3012 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3076 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3013 rc = pci_enable_device(pdev); 3077 rc = pci_enable_device(pdev);
3014 if (rc < 0) { 3078 if (rc < 0) {
@@ -3042,7 +3106,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3042 goto err_out_mwi_2; 3106 goto err_out_mwi_2;
3043 } 3107 }
3044 3108
3045 tp->cp_cmd = PCIMulRW | RxChkSum; 3109 tp->cp_cmd = RxChkSum;
3046 3110
3047 if ((sizeof(dma_addr_t) > 4) && 3111 if ((sizeof(dma_addr_t) > 4) &&
3048 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 3112 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3087,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3087 /* Identify chip attached to board */ 3151 /* Identify chip attached to board */
3088 rtl8169_get_mac_version(tp, ioaddr); 3152 rtl8169_get_mac_version(tp, ioaddr);
3089 3153
3154 /*
3155 * Pretend we are using VLANs; This bypasses a nasty bug where
3156 * Interrupts stop flowing on high load on 8110SCd controllers.
3157 */
3158 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3159 tp->cp_cmd |= RxVlan;
3160
3090 rtl_init_mdio_ops(tp); 3161 rtl_init_mdio_ops(tp);
3091 rtl_init_pll_power_ops(tp); 3162 rtl_init_pll_power_ops(tp);
3092 3163
@@ -3155,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3155 3226
3156 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 3227 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3157 3228
3158#ifdef CONFIG_R8169_VLAN 3229 dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
3159 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3160#endif
3161 dev->features |= NETIF_F_GRO;
3162 3230
3163 tp->intr_mask = 0xffff; 3231 tp->intr_mask = 0xffff;
3164 tp->hw_start = cfg->hw_start; 3232 tp->hw_start = cfg->hw_start;
@@ -3276,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev)
3276 3344
3277 rtl8169_init_phy(dev, tp); 3345 rtl8169_init_phy(dev, tp);
3278 3346
3279 /* 3347 rtl8169_vlan_mode(dev);
3280 * Pretend we are using VLANs; This bypasses a nasty bug where
3281 * Interrupts stop flowing on high load on 8110SCd controllers.
3282 */
3283 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3284 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3285 3348
3286 rtl_pll_power_up(tp); 3349 rtl_pll_power_up(tp);
3287 3350
@@ -3318,7 +3381,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3318 /* Disable interrupts */ 3381 /* Disable interrupts */
3319 rtl8169_irq_mask_and_ack(ioaddr); 3382 rtl8169_irq_mask_and_ack(ioaddr);
3320 3383
3321 if (tp->mac_version == RTL_GIGA_MAC_VER_28) { 3384 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3385 tp->mac_version == RTL_GIGA_MAC_VER_28) {
3322 while (RTL_R8(TxPoll) & NPQ) 3386 while (RTL_R8(TxPoll) & NPQ)
3323 udelay(20); 3387 udelay(20);
3324 3388
@@ -3847,8 +3911,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3847 Cxpl_dbg_sel | \ 3911 Cxpl_dbg_sel | \
3848 ASF | \ 3912 ASF | \
3849 PktCntrDisable | \ 3913 PktCntrDisable | \
3850 PCIDAC | \ 3914 Mac_dbgo_sel)
3851 PCIMulRW)
3852 3915
3853static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 3916static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3854{ 3917{
@@ -3878,8 +3941,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3878 if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) 3941 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3879 RTL_W8(Config1, cfg1 & ~LEDS0); 3942 RTL_W8(Config1, cfg1 & ~LEDS0);
3880 3943
3881 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3882
3883 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 3944 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
3884} 3945}
3885 3946
@@ -3891,8 +3952,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3891 3952
3892 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); 3953 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
3893 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 3954 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3894
3895 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3896} 3955}
3897 3956
3898static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 3957static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3902,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
3902 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 3961 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
3903} 3962}
3904 3963
3964static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3965{
3966 static const struct ephy_info e_info_8105e_1[] = {
3967 { 0x07, 0, 0x4000 },
3968 { 0x19, 0, 0x0200 },
3969 { 0x19, 0, 0x0020 },
3970 { 0x1e, 0, 0x2000 },
3971 { 0x03, 0, 0x0001 },
3972 { 0x19, 0, 0x0100 },
3973 { 0x19, 0, 0x0004 },
3974 { 0x0a, 0, 0x0020 }
3975 };
3976
3977 /* Force LAN exit from ASPM if Rx/Tx are not idel */
3978 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
3979
3980 /* disable Early Tally Counter */
3981 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
3982
3983 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
3984 RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
3985
3986 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
3987}
3988
3989static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3990{
3991 rtl_hw_start_8105e_1(ioaddr, pdev);
3992 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
3993}
3994
3905static void rtl_hw_start_8101(struct net_device *dev) 3995static void rtl_hw_start_8101(struct net_device *dev)
3906{ 3996{
3907 struct rtl8169_private *tp = netdev_priv(dev); 3997 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3918,6 +4008,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
3918 } 4008 }
3919 } 4009 }
3920 4010
4011 RTL_W8(Cfg9346, Cfg9346_Unlock);
4012
3921 switch (tp->mac_version) { 4013 switch (tp->mac_version) {
3922 case RTL_GIGA_MAC_VER_07: 4014 case RTL_GIGA_MAC_VER_07:
3923 rtl_hw_start_8102e_1(ioaddr, pdev); 4015 rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3930,16 +4022,22 @@ static void rtl_hw_start_8101(struct net_device *dev)
3930 case RTL_GIGA_MAC_VER_09: 4022 case RTL_GIGA_MAC_VER_09:
3931 rtl_hw_start_8102e_2(ioaddr, pdev); 4023 rtl_hw_start_8102e_2(ioaddr, pdev);
3932 break; 4024 break;
4025
4026 case RTL_GIGA_MAC_VER_29:
4027 rtl_hw_start_8105e_1(ioaddr, pdev);
4028 break;
4029 case RTL_GIGA_MAC_VER_30:
4030 rtl_hw_start_8105e_2(ioaddr, pdev);
4031 break;
3933 } 4032 }
3934 4033
3935 RTL_W8(Cfg9346, Cfg9346_Unlock); 4034 RTL_W8(Cfg9346, Cfg9346_Lock);
3936 4035
3937 RTL_W8(MaxTxPacketSize, TxPacketMax); 4036 RTL_W8(MaxTxPacketSize, TxPacketMax);
3938 4037
3939 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 4038 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3940 4039
3941 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 4040 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
3942
3943 RTL_W16(CPlusCmd, tp->cp_cmd); 4041 RTL_W16(CPlusCmd, tp->cp_cmd);
3944 4042
3945 RTL_W16(IntrMitigate, 0x0000); 4043 RTL_W16(IntrMitigate, 0x0000);
@@ -3949,14 +4047,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
3949 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 4047 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3950 rtl_set_rx_tx_config_registers(tp); 4048 rtl_set_rx_tx_config_registers(tp);
3951 4049
3952 RTL_W8(Cfg9346, Cfg9346_Lock);
3953
3954 RTL_R8(IntrMask); 4050 RTL_R8(IntrMask);
3955 4051
3956 rtl_set_rx_mode(dev); 4052 rtl_set_rx_mode(dev);
3957 4053
3958 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3959
3960 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 4054 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
3961 4055
3962 RTL_W16(IntrMask, tp->intr_event); 4056 RTL_W16(IntrMask, tp->intr_event);
@@ -4593,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4593 skb_put(skb, pkt_size); 4687 skb_put(skb, pkt_size);
4594 skb->protocol = eth_type_trans(skb, dev); 4688 skb->protocol = eth_type_trans(skb, dev);
4595 4689
4596 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { 4690 rtl8169_rx_vlan_tag(desc, skb);
4597 if (likely(polling)) 4691
4598 napi_gro_receive(&tp->napi, skb); 4692 if (likely(polling))
4599 else 4693 napi_gro_receive(&tp->napi, skb);
4600 netif_rx(skb); 4694 else
4601 } 4695 netif_rx(skb);
4602 4696
4603 dev->stats.rx_bytes += pkt_size; 4697 dev->stats.rx_bytes += pkt_size;
4604 dev->stats.rx_packets++; 4698 dev->stats.rx_packets++;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 39c17cecb8b9..2ad6364103ea 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7556 */ 7556 */
7557 skb->ip_summed = CHECKSUM_UNNECESSARY; 7557 skb->ip_summed = CHECKSUM_UNNECESSARY;
7558 if (ring_data->lro) { 7558 if (ring_data->lro) {
7559 u32 tcp_len; 7559 u32 tcp_len = 0;
7560 u8 *tcp; 7560 u8 *tcp;
7561 int ret = 0; 7561 int ret = 0;
7562 7562
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 158d5b5630b6..807178ef65ad 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -588,9 +588,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
588 struct ethtool_test *test, u64 *data) 588 struct ethtool_test *test, u64 *data)
589{ 589{
590 struct efx_nic *efx = netdev_priv(net_dev); 590 struct efx_nic *efx = netdev_priv(net_dev);
591 struct efx_self_tests efx_tests; 591 struct efx_self_tests *efx_tests;
592 int already_up; 592 int already_up;
593 int rc; 593 int rc = -ENOMEM;
594
595 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
596 if (!efx_tests)
597 goto fail;
598
594 599
595 ASSERT_RTNL(); 600 ASSERT_RTNL();
596 if (efx->state != STATE_RUNNING) { 601 if (efx->state != STATE_RUNNING) {
@@ -608,13 +613,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
608 if (rc) { 613 if (rc) {
609 netif_err(efx, drv, efx->net_dev, 614 netif_err(efx, drv, efx->net_dev,
610 "failed opening device.\n"); 615 "failed opening device.\n");
611 goto fail2; 616 goto fail1;
612 } 617 }
613 } 618 }
614 619
615 memset(&efx_tests, 0, sizeof(efx_tests)); 620 rc = efx_selftest(efx, efx_tests, test->flags);
616
617 rc = efx_selftest(efx, &efx_tests, test->flags);
618 621
619 if (!already_up) 622 if (!already_up)
620 dev_close(efx->net_dev); 623 dev_close(efx->net_dev);
@@ -623,10 +626,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
623 rc == 0 ? "passed" : "failed", 626 rc == 0 ? "passed" : "failed",
624 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 627 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
625 628
626 fail2: 629fail1:
627 fail1:
628 /* Fill ethtool results structures */ 630 /* Fill ethtool results structures */
629 efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 631 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
632 kfree(efx_tests);
633fail:
630 if (rc) 634 if (rc)
631 test->flags |= ETH_TEST_FL_FAILED; 635 test->flags |= ETH_TEST_FL_FAILED;
632} 636}
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 640e368ebeee..84d4167eee9a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
495 sis_priv->mii_info.reg_num_mask = 0x1f; 495 sis_priv->mii_info.reg_num_mask = 0x1f;
496 496
497 /* Get Mac address according to the chip revision */ 497 /* Get Mac address according to the chip revision */
498 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); 498 sis_priv->chipset_rev = pci_dev->revision;
499 if(netif_msg_probe(sis_priv)) 499 if(netif_msg_probe(sis_priv))
500 printk(KERN_DEBUG "%s: detected revision %2.2x, " 500 printk(KERN_DEBUG "%s: detected revision %2.2x, "
501 "trying to get MAC address...\n", 501 "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
532 /* save our host bridge revision */ 532 /* save our host bridge revision */
533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); 533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
534 if (dev) { 534 if (dev) {
535 pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev); 535 sis_priv->host_bridge_rev = dev->revision;
536 pci_dev_put(dev); 536 pci_dev_put(dev);
537 } 537 }
538 538
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba736..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3858 3858
3859 /* device is off until link detection */
3860 netif_carrier_off(dev);
3861
3862 return dev; 3859 return dev;
3863} 3860}
3864 3861
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e48a80885343..ace6404e2fac 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -25,153 +25,10 @@
25 * Microchip Technology, 24C01A/02A/04A Data Sheet 25 * Microchip Technology, 24C01A/02A/04A Data Sheet
26 * available in PDF format from www.microchip.com 26 * available in PDF format from www.microchip.com
27 * 27 *
28 * Change History
29 *
30 * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses
31 * new PCI BIOS interface.
32 * Alan Cox <alan@lxorguk.ukuu.org.uk>:
33 * Fixed the out of memory
34 * handling.
35 *
36 * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
37 *
38 * v1.1 Dec 20, 1999 - Removed linux version checking
39 * Patch from Tigran Aivazian.
40 * - v1.1 includes Alan's SMP updates.
41 * - We still have problems on SMP though,
42 * but I'm looking into that.
43 *
44 * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
45 * - Removed dependency of HZ being 100.
46 * - We now allow higher priority timers to
47 * overwrite timers like TLAN_TIMER_ACTIVITY
48 * Patch from John Cagle <john.cagle@compaq.com>.
49 * - Fixed a few compiler warnings.
50 *
51 * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
52 * - Removed call to pci_present().
53 * - Removed SA_INTERRUPT flag from irq handler.
54 * - Added __init and __initdata to reduce resisdent
55 * code size.
56 * - Driver now uses module_init/module_exit.
57 * - Rewrote init_module and tlan_probe to
58 * share a lot more code. We now use tlan_probe
59 * with builtin and module driver.
60 * - Driver ported to new net API.
61 * - tlan.txt has been reworked to reflect current
62 * driver (almost)
63 * - Other minor stuff
64 *
65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
66 * network cleanup in 2.3.43pre7 (Tigran & myself)
67 * - Minor stuff.
68 *
69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the
70 * driver if no cable/link were present.
71 * - Cosmetic changes.
72 * - TODO: Port completely to new PCI/DMA API
73 * Auto-Neg fallback.
74 *
75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters.
76 * Haven't tested it though, as the kernel support
77 * is currently broken (2.3.99p4p3).
78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at
81 * http://hp.sourceforge.net/
82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald
85 * Beckers userspace MII diagnostics utility.
86 *
87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
88 * - Added link information to Auto-Neg and forced
89 * modes. When NIC operates with auto-neg the driver
90 * will report Link speed & duplex modes as well as
91 * link partner abilities. When forced link is used,
92 * the driver will report status of the established
93 * link.
94 * Please read tlan.txt for additional information.
95 * - Removed call to check_region(), and used
96 * return value of request_region() instead.
97 *
98 * v1.8a May 28, 2000 - Minor updates.
99 *
100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
101 * - Updated with timer fixes from Andrew Morton.
102 * - Fixed module race in TLan_Open.
103 * - Added routine to monitor PHY status.
104 * - Added activity led support for Proliant devices.
105 *
106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
107 * like the Compaq NetFlex3/E.
108 * - Rewrote tlan_probe to better handle multiple
109 * bus probes. Probing and device setup is now
110 * done through TLan_Probe and TLan_init_one. Actual
111 * hardware probe is done with kernel API and
112 * TLan_EisaProbe.
113 * - Adjusted debug information for probing.
114 * - Fixed bug that would cause general debug
115 * information to be printed after driver removal.
116 * - Added transmit timeout handling.
117 * - Fixed OOM return values in tlan_probe.
118 * - Fixed possible mem leak in tlan_exit
119 * (now tlan_remove_one).
120 * - Fixed timer bug in TLan_phyMonitor.
121 * - This driver version is alpha quality, please
122 * send me any bug issues you may encounter.
123 *
124 * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
125 * set for EISA cards.
126 * - Added support for NetFlex3/E with nibble-rate
127 * 10Base-T PHY. This is untestet as I haven't got
128 * one of these cards.
129 * - Fixed timer being added twice.
130 * - Disabled PhyMonitoring by default as this is
131 * work in progress. Define MONITOR to enable it.
132 * - Now we don't display link info with PHYs that
133 * doesn't support it (level1).
134 * - Incresed tx_timeout beacuse of auto-neg.
135 * - Adjusted timers for forced speeds.
136 *
137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
138 *
139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
140 * when link can't be established.
141 * - Added the bbuf option as a kernel parameter.
142 * - Fixed ioaddr probe bug.
143 * - Fixed stupid deadlock with MII interrupts.
144 * - Added support for speed/duplex selection with
145 * multiple nics.
146 * - Added partly fix for TX Channel lockup with
147 * TLAN v1.0 silicon. This needs to be investigated
148 * further.
149 *
150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
151 * interrupt. Thanks goes to
152 * Adam Keys <adam@ti.com>
153 * Denis Beaudoin <dbeaudoin@ti.com>
154 * for providing the patch.
155 * - Fixed auto-neg output when using multiple
156 * adapters.
157 * - Converted to use new taskq interface.
158 *
159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
160 *
161 * Samuel Chessman <chessman@tux.org> New Maintainer!
162 *
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 * v1.16 Jan 6 2011 - Make checkpatch.pl happy.
171 * v1.17 Jan 6 2011 - Add suspend/resume support.
172 *
173 ******************************************************************************/ 28 ******************************************************************************/
174 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
175#include <linux/module.h> 32#include <linux/module.h>
176#include <linux/init.h> 33#include <linux/init.h>
177#include <linux/ioport.h> 34#include <linux/ioport.h>
@@ -204,7 +61,7 @@ module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); 61MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, 62MODULE_PARM_DESC(duplex,
206 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); 63 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
207MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); 64MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
208 65
209MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); 66MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
210MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); 67MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -542,7 +399,7 @@ static int __init tlan_probe(void)
542{ 399{
543 int rc = -ENODEV; 400 int rc = -ENODEV;
544 401
545 printk(KERN_INFO "%s", tlan_banner); 402 pr_info("%s", tlan_banner);
546 403
547 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 404 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
548 405
@@ -551,16 +408,16 @@ static int __init tlan_probe(void)
551 rc = pci_register_driver(&tlan_driver); 408 rc = pci_register_driver(&tlan_driver);
552 409
553 if (rc != 0) { 410 if (rc != 0) {
554 printk(KERN_ERR "TLAN: Could not register pci driver.\n"); 411 pr_err("Could not register pci driver\n");
555 goto err_out_pci_free; 412 goto err_out_pci_free;
556 } 413 }
557 414
558 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 415 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
559 tlan_eisa_probe(); 416 tlan_eisa_probe();
560 417
561 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", 418 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
562 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s", 419 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
563 tlan_have_pci, tlan_have_eisa); 420 tlan_have_pci, tlan_have_eisa);
564 421
565 if (tlan_devices_installed == 0) { 422 if (tlan_devices_installed == 0) {
566 rc = -ENODEV; 423 rc = -ENODEV;
@@ -619,7 +476,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
619 476
620 rc = pci_request_regions(pdev, tlan_signature); 477 rc = pci_request_regions(pdev, tlan_signature);
621 if (rc) { 478 if (rc) {
622 printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); 479 pr_err("Could not reserve IO regions\n");
623 goto err_out; 480 goto err_out;
624 } 481 }
625 } 482 }
@@ -627,7 +484,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
627 484
628 dev = alloc_etherdev(sizeof(struct tlan_priv)); 485 dev = alloc_etherdev(sizeof(struct tlan_priv));
629 if (dev == NULL) { 486 if (dev == NULL) {
630 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); 487 pr_err("Could not allocate memory for device\n");
631 rc = -ENOMEM; 488 rc = -ENOMEM;
632 goto err_out_regions; 489 goto err_out_regions;
633 } 490 }
@@ -646,8 +503,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
646 503
647 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
648 if (rc) { 505 if (rc) {
649 printk(KERN_ERR 506 pr_err("No suitable PCI mapping available\n");
650 "TLAN: No suitable PCI mapping available.\n");
651 goto err_out_free_dev; 507 goto err_out_free_dev;
652 } 508 }
653 509
@@ -661,7 +517,7 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
661 } 517 }
662 } 518 }
663 if (!pci_io_base) { 519 if (!pci_io_base) {
664 printk(KERN_ERR "TLAN: No IO mappings available\n"); 520 pr_err("No IO mappings available\n");
665 rc = -EIO; 521 rc = -EIO;
666 goto err_out_free_dev; 522 goto err_out_free_dev;
667 } 523 }
@@ -717,13 +573,13 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
717 573
718 rc = tlan_init(dev); 574 rc = tlan_init(dev);
719 if (rc) { 575 if (rc) {
720 printk(KERN_ERR "TLAN: Could not set up device.\n"); 576 pr_err("Could not set up device\n");
721 goto err_out_free_dev; 577 goto err_out_free_dev;
722 } 578 }
723 579
724 rc = register_netdev(dev); 580 rc = register_netdev(dev);
725 if (rc) { 581 if (rc) {
726 printk(KERN_ERR "TLAN: Could not register device.\n"); 582 pr_err("Could not register device\n");
727 goto err_out_uninit; 583 goto err_out_uninit;
728 } 584 }
729 585
@@ -740,12 +596,11 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
740 tlan_have_eisa++; 596 tlan_have_eisa++;
741 } 597 }
742 598
743 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", 599 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
744 dev->name, 600 (int)dev->irq,
745 (int) dev->irq, 601 (int)dev->base_addr,
746 (int) dev->base_addr, 602 priv->adapter->device_label,
747 priv->adapter->device_label, 603 priv->adapter_rev);
748 priv->adapter_rev);
749 return 0; 604 return 0;
750 605
751err_out_uninit: 606err_out_uninit:
@@ -861,7 +716,7 @@ static void __init tlan_eisa_probe(void)
861 } 716 }
862 717
863 if (debug == 0x10) 718 if (debug == 0x10)
864 printk(KERN_INFO "Found one\n"); 719 pr_info("Found one\n");
865 720
866 721
867 /* Get irq from board */ 722 /* Get irq from board */
@@ -890,12 +745,12 @@ static void __init tlan_eisa_probe(void)
890 745
891out: 746out:
892 if (debug == 0x10) 747 if (debug == 0x10)
893 printk(KERN_INFO "None found\n"); 748 pr_info("None found\n");
894 continue; 749 continue;
895 750
896out2: 751out2:
897 if (debug == 0x10) 752 if (debug == 0x10)
898 printk(KERN_INFO "Card found but it is not enabled, skipping\n"); 753 pr_info("Card found but it is not enabled, skipping\n");
899 continue; 754 continue;
900 755
901 } 756 }
@@ -963,8 +818,7 @@ static int tlan_init(struct net_device *dev)
963 priv->dma_size = dma_size; 818 priv->dma_size = dma_size;
964 819
965 if (priv->dma_storage == NULL) { 820 if (priv->dma_storage == NULL) {
966 printk(KERN_ERR 821 pr_err("Could not allocate lists and buffers for %s\n",
967 "TLAN: Could not allocate lists and buffers for %s.\n",
968 dev->name); 822 dev->name);
969 return -ENOMEM; 823 return -ENOMEM;
970 } 824 }
@@ -982,9 +836,8 @@ static int tlan_init(struct net_device *dev)
982 (u8) priv->adapter->addr_ofs + i, 836 (u8) priv->adapter->addr_ofs + i,
983 (u8 *) &dev->dev_addr[i]); 837 (u8 *) &dev->dev_addr[i]);
984 if (err) { 838 if (err) {
985 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", 839 pr_err("%s: Error reading MAC from eeprom: %d\n",
986 dev->name, 840 dev->name, err);
987 err);
988 } 841 }
989 dev->addr_len = 6; 842 dev->addr_len = 6;
990 843
@@ -1028,8 +881,8 @@ static int tlan_open(struct net_device *dev)
1028 dev->name, dev); 881 dev->name, dev);
1029 882
1030 if (err) { 883 if (err) {
1031 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 884 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
1032 dev->name, dev->irq); 885 dev->irq);
1033 return err; 886 return err;
1034 } 887 }
1035 888
@@ -1512,8 +1365,8 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1512 } 1365 }
1513 1366
1514 if (!ack) 1367 if (!ack)
1515 printk(KERN_INFO 1368 netdev_info(dev,
1516 "TLAN: Received interrupt for uncompleted TX frame.\n"); 1369 "Received interrupt for uncompleted TX frame\n");
1517 1370
1518 if (eoc) { 1371 if (eoc) {
1519 TLAN_DBG(TLAN_DEBUG_TX, 1372 TLAN_DBG(TLAN_DEBUG_TX,
@@ -1667,8 +1520,8 @@ drop_and_reuse:
1667 } 1520 }
1668 1521
1669 if (!ack) 1522 if (!ack)
1670 printk(KERN_INFO 1523 netdev_info(dev,
1671 "TLAN: Received interrupt for uncompleted RX frame.\n"); 1524 "Received interrupt for uncompleted RX frame\n");
1672 1525
1673 1526
1674 if (eoc) { 1527 if (eoc) {
@@ -1724,7 +1577,7 @@ drop_and_reuse:
1724 1577
1725static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int) 1578static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1726{ 1579{
1727 pr_info("TLAN: Test interrupt on %s.\n", dev->name); 1580 netdev_info(dev, "Test interrupt\n");
1728 return 1; 1581 return 1;
1729 1582
1730} 1583}
@@ -1818,7 +1671,7 @@ static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1818 if (host_int & TLAN_HI_IV_MASK) { 1671 if (host_int & TLAN_HI_IV_MASK) {
1819 netif_stop_queue(dev); 1672 netif_stop_queue(dev);
1820 error = inl(dev->base_addr + TLAN_CH_PARM); 1673 error = inl(dev->base_addr + TLAN_CH_PARM);
1821 pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error); 1674 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1822 tlan_read_and_clear_stats(dev, TLAN_RECORD); 1675 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1823 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); 1676 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1824 1677
@@ -2059,7 +1912,7 @@ static void tlan_reset_lists(struct net_device *dev)
2059 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2060 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
2061 if (!skb) { 1914 if (!skb) {
2062 pr_err("TLAN: out of memory for received data.\n"); 1915 netdev_err(dev, "Out of memory for received data\n");
2063 break; 1916 break;
2064 } 1917 }
2065 1918
@@ -2143,13 +1996,13 @@ static void tlan_print_dio(u16 io_base)
2143 u32 data0, data1; 1996 u32 data0, data1;
2144 int i; 1997 int i;
2145 1998
2146 pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n", 1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2147 io_base); 2000 io_base);
2148 pr_info("TLAN: Off. +0 +4\n"); 2001 pr_info("Off. +0 +4\n");
2149 for (i = 0; i < 0x4C; i += 8) { 2002 for (i = 0; i < 0x4C; i += 8) {
2150 data0 = tlan_dio_read32(io_base, i); 2003 data0 = tlan_dio_read32(io_base, i);
2151 data1 = tlan_dio_read32(io_base, i + 0x4); 2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2152 pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1); 2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2153 } 2006 }
2154 2007
2155} 2008}
@@ -2178,14 +2031,14 @@ static void tlan_print_list(struct tlan_list *list, char *type, int num)
2178{ 2031{
2179 int i; 2032 int i;
2180 2033
2181 pr_info("TLAN: %s List %d at %p\n", type, num, list); 2034 pr_info("%s List %d at %p\n", type, num, list);
2182 pr_info("TLAN: Forward = 0x%08x\n", list->forward); 2035 pr_info(" Forward = 0x%08x\n", list->forward);
2183 pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat); 2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2184 pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size); 2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2185 /* for (i = 0; i < 10; i++) { */ 2038 /* for (i = 0; i < 10; i++) { */
2186 for (i = 0; i < 2; i++) { 2039 for (i = 0; i < 2; i++) {
2187 pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2188 i, list->buffer[i].count, list->buffer[i].address); 2041 i, list->buffer[i].count, list->buffer[i].address);
2189 } 2042 }
2190 2043
2191} 2044}
@@ -2400,7 +2253,7 @@ tlan_finish_reset(struct net_device *dev)
2400 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) || 2253 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2401 (priv->aui)) { 2254 (priv->aui)) {
2402 status = MII_GS_LINK; 2255 status = MII_GS_LINK;
2403 pr_info("TLAN: %s: Link forced.\n", dev->name); 2256 netdev_info(dev, "Link forced\n");
2404 } else { 2257 } else {
2405 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); 2258 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2406 udelay(1000); 2259 udelay(1000);
@@ -2412,24 +2265,21 @@ tlan_finish_reset(struct net_device *dev)
2412 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner); 2265 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2413 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par); 2266 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2414 2267
2415 pr_info("TLAN: %s: Link active with ", dev->name); 2268 netdev_info(dev,
2416 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2269 "Link active with %s %uMbps %s-Duplex\n",
2417 pr_info("forced 10%sMbps %s-Duplex\n", 2270 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2418 tlphy_par & TLAN_PHY_SPEED_100 2271 ? "forced" : "Autonegotiation enabled,",
2419 ? "" : "0", 2272 tlphy_par & TLAN_PHY_SPEED_100
2420 tlphy_par & TLAN_PHY_DUPLEX_FULL 2273 ? 100 : 10,
2421 ? "Full" : "Half"); 2274 tlphy_par & TLAN_PHY_DUPLEX_FULL
2422 } else { 2275 ? "Full" : "Half");
2423 pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n", 2276
2424 tlphy_par & TLAN_PHY_SPEED_100 2277 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2425 ? "" : "0", 2278 netdev_info(dev, "Partner capability:");
2426 tlphy_par & TLAN_PHY_DUPLEX_FULL 2279 for (i = 5; i < 10; i++)
2427 ? "Full" : "half"); 2280 if (partner & (1 << i))
2428 pr_info("TLAN: Partner capability: "); 2281 pr_cont(" %s", media[i-5]);
2429 for (i = 5; i <= 10; i++) 2282 pr_cont("\n");
2430 if (partner & (1<<i))
2431 printk("%s", media[i-5]);
2432 printk("\n");
2433 } 2283 }
2434 2284
2435 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 2285 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
@@ -2441,7 +2291,7 @@ tlan_finish_reset(struct net_device *dev)
2441 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT); 2291 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2442#endif 2292#endif
2443 } else if (status & MII_GS_LINK) { 2293 } else if (status & MII_GS_LINK) {
2444 pr_info("TLAN: %s: Link active\n", dev->name); 2294 netdev_info(dev, "Link active\n");
2445 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 2295 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2446 TLAN_LED_LINK); 2296 TLAN_LED_LINK);
2447 } 2297 }
@@ -2467,8 +2317,7 @@ tlan_finish_reset(struct net_device *dev)
2467 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD); 2317 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2468 netif_carrier_on(dev); 2318 netif_carrier_on(dev);
2469 } else { 2319 } else {
2470 pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n", 2320 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2471 dev->name);
2472 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET); 2321 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2473 return; 2322 return;
2474 } 2323 }
@@ -2552,23 +2401,20 @@ static void tlan_phy_print(struct net_device *dev)
2552 phy = priv->phy[priv->phy_num]; 2401 phy = priv->phy[priv->phy_num];
2553 2402
2554 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { 2403 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2555 pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name); 2404 netdev_info(dev, "Unmanaged PHY\n");
2556 } else if (phy <= TLAN_PHY_MAX_ADDR) { 2405 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2557 pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy); 2406 netdev_info(dev, "PHY 0x%02x\n", phy);
2558 pr_info("TLAN: Off. +0 +1 +2 +3\n"); 2407 pr_info(" Off. +0 +1 +2 +3\n");
2559 for (i = 0; i < 0x20; i += 4) { 2408 for (i = 0; i < 0x20; i += 4) {
2560 pr_info("TLAN: 0x%02x", i);
2561 tlan_mii_read_reg(dev, phy, i, &data0); 2409 tlan_mii_read_reg(dev, phy, i, &data0);
2562 printk(" 0x%04hx", data0);
2563 tlan_mii_read_reg(dev, phy, i + 1, &data1); 2410 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2564 printk(" 0x%04hx", data1);
2565 tlan_mii_read_reg(dev, phy, i + 2, &data2); 2411 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2566 printk(" 0x%04hx", data2);
2567 tlan_mii_read_reg(dev, phy, i + 3, &data3); 2412 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2568 printk(" 0x%04hx\n", data3); 2413 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2414 i, data0, data1, data2, data3);
2569 } 2415 }
2570 } else { 2416 } else {
2571 pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name); 2417 netdev_info(dev, "Invalid PHY\n");
2572 } 2418 }
2573 2419
2574} 2420}
@@ -2635,7 +2481,7 @@ static void tlan_phy_detect(struct net_device *dev)
2635 else if (priv->phy[0] != TLAN_PHY_NONE) 2481 else if (priv->phy[0] != TLAN_PHY_NONE)
2636 priv->phy_num = 0; 2482 priv->phy_num = 0;
2637 else 2483 else
2638 pr_info("TLAN: Cannot initialize device, no PHY was found!\n"); 2484 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2639 2485
2640} 2486}
2641 2487
@@ -2763,8 +2609,7 @@ static void tlan_phy_start_link(struct net_device *dev)
2763 * but the card need additional time to start AN. 2609 * but the card need additional time to start AN.
2764 * .5 sec should be plenty extra. 2610 * .5 sec should be plenty extra.
2765 */ 2611 */
2766 pr_info("TLAN: %s: Starting autonegotiation.\n", 2612 netdev_info(dev, "Starting autonegotiation\n");
2767 dev->name);
2768 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN); 2613 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2769 return; 2614 return;
2770 } 2615 }
@@ -2827,16 +2672,16 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2827 * more time. Perhaps we should fail after a while. 2672 * more time. Perhaps we should fail after a while.
2828 */ 2673 */
2829 if (!priv->neg_be_verbose++) { 2674 if (!priv->neg_be_verbose++) {
2830 pr_info("TLAN: Giving autonegotiation more time.\n"); 2675 pr_info("Giving autonegotiation more time.\n");
2831 pr_info("TLAN: Please check that your adapter has\n"); 2676 pr_info("Please check that your adapter has\n");
2832 pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2677 pr_info("been properly connected to a HUB or Switch.\n");
2833 pr_info("TLAN: Trying to establish link in the background...\n"); 2678 pr_info("Trying to establish link in the background...\n");
2834 } 2679 }
2835 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN); 2680 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2836 return; 2681 return;
2837 } 2682 }
2838 2683
2839 pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name); 2684 netdev_info(dev, "Autonegotiation complete\n");
2840 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv); 2685 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2841 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa); 2686 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2842 mode = an_adv & an_lpa & 0x03E0; 2687 mode = an_adv & an_lpa & 0x03E0;
@@ -2861,11 +2706,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2861 (an_adv & an_lpa & 0x0040)) { 2706 (an_adv & an_lpa & 0x0040)) {
2862 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 2707 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2863 MII_GC_AUTOENB | MII_GC_DUPLEX); 2708 MII_GC_AUTOENB | MII_GC_DUPLEX);
2864 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n"); 2709 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2865 } else { 2710 } else {
2866 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 2711 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2867 MII_GC_AUTOENB); 2712 MII_GC_AUTOENB);
2868 pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n"); 2713 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2869 } 2714 }
2870 } 2715 }
2871 2716
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 55786a0efc41..f5e9ac00a07b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -34,6 +34,8 @@
34 * Modifications for 2.3.99-pre5 kernel. 34 * Modifications for 2.3.99-pre5 kernel.
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#define DRV_NAME "tun" 39#define DRV_NAME "tun"
38#define DRV_VERSION "1.6" 40#define DRV_VERSION "1.6"
39#define DRV_DESCRIPTION "Universal TUN/TAP device driver" 41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
@@ -76,11 +78,27 @@
76#ifdef TUN_DEBUG 78#ifdef TUN_DEBUG
77static int debug; 79static int debug;
78 80
79#define DBG if(tun->debug)printk 81#define tun_debug(level, tun, fmt, args...) \
80#define DBG1 if(debug==2)printk 82do { \
83 if (tun->debug) \
84 netdev_printk(level, tun->dev, fmt, ##args); \
85} while (0)
86#define DBG1(level, fmt, args...) \
87do { \
88 if (debug == 2) \
89 printk(level fmt, ##args); \
90} while (0)
81#else 91#else
82#define DBG( a... ) 92#define tun_debug(level, tun, fmt, args...) \
83#define DBG1( a... ) 93do { \
94 if (0) \
95 netdev_printk(level, tun->dev, fmt, ##args); \
96} while (0)
97#define DBG1(level, fmt, args...) \
98do { \
99 if (0) \
100 printk(level fmt, ##args); \
101} while (0)
84#endif 102#endif
85 103
86#define FLT_EXACT_COUNT 8 104#define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
205 tun_detach(tfile->tun); 223 tun_detach(tfile->tun);
206} 224}
207 225
208/* TAP filterting */ 226/* TAP filtering */
209static void addr_hash_set(u32 *mask, const u8 *addr) 227static void addr_hash_set(u32 *mask, const u8 *addr)
210{ 228{
211 int n = ether_crc(ETH_ALEN, addr) >> 26; 229 int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
360{ 378{
361 struct tun_struct *tun = netdev_priv(dev); 379 struct tun_struct *tun = netdev_priv(dev);
362 380
363 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 381 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
364 382
365 /* Drop packet if interface is not attached */ 383 /* Drop packet if interface is not attached */
366 if (!tun->tfile) 384 if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
499 517
500 sk = tun->socket.sk; 518 sk = tun->socket.sk;
501 519
502 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 520 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
503 521
504 poll_wait(file, &tun->wq.wait, wait); 522 poll_wait(file, &tun->wq.wait, wait);
505 523
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
690 if (!tun) 708 if (!tun)
691 return -EBADFD; 709 return -EBADFD;
692 710
693 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 711 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
694 712
695 result = tun_get_user(tun, iv, iov_length(iv, count), 713 result = tun_get_user(tun, iv, iov_length(iv, count),
696 file->f_flags & O_NONBLOCK); 714 file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
739 else if (sinfo->gso_type & SKB_GSO_UDP) 757 else if (sinfo->gso_type & SKB_GSO_UDP)
740 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 758 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
741 else { 759 else {
742 printk(KERN_ERR "tun: unexpected GSO type: " 760 pr_err("unexpected GSO type: "
743 "0x%x, gso_size %d, hdr_len %d\n", 761 "0x%x, gso_size %d, hdr_len %d\n",
744 sinfo->gso_type, gso.gso_size, 762 sinfo->gso_type, gso.gso_size,
745 gso.hdr_len); 763 gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
786 struct sk_buff *skb; 804 struct sk_buff *skb;
787 ssize_t ret = 0; 805 ssize_t ret = 0;
788 806
789 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 807 tun_debug(KERN_INFO, tun, "tun_chr_read\n");
790 808
791 add_wait_queue(&tun->wq.wait, &wait); 809 add_wait_queue(&tun->wq.wait, &wait);
792 while (len) { 810 while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1083 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1101 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1084 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1102 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1085 device_create_file(&tun->dev->dev, &dev_attr_group)) 1103 device_create_file(&tun->dev->dev, &dev_attr_group))
1086 printk(KERN_ERR "Failed to create tun sysfs files\n"); 1104 pr_err("Failed to create tun sysfs files\n");
1087 1105
1088 sk->sk_destruct = tun_sock_destruct; 1106 sk->sk_destruct = tun_sock_destruct;
1089 1107
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1092 goto failed; 1110 goto failed;
1093 } 1111 }
1094 1112
1095 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 1113 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1096 1114
1097 if (ifr->ifr_flags & IFF_NO_PI) 1115 if (ifr->ifr_flags & IFF_NO_PI)
1098 tun->flags |= TUN_NO_PI; 1116 tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1129static int tun_get_iff(struct net *net, struct tun_struct *tun, 1147static int tun_get_iff(struct net *net, struct tun_struct *tun,
1130 struct ifreq *ifr) 1148 struct ifreq *ifr)
1131{ 1149{
1132 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 1150 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1133 1151
1134 strcpy(ifr->ifr_name, tun->dev->name); 1152 strcpy(ifr->ifr_name, tun->dev->name);
1135 1153
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1229 if (!tun) 1247 if (!tun)
1230 goto unlock; 1248 goto unlock;
1231 1249
1232 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1250 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
1233 1251
1234 ret = 0; 1252 ret = 0;
1235 switch (cmd) { 1253 switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1249 else 1267 else
1250 tun->flags &= ~TUN_NOCHECKSUM; 1268 tun->flags &= ~TUN_NOCHECKSUM;
1251 1269
1252 DBG(KERN_INFO "%s: checksum %s\n", 1270 tun_debug(KERN_INFO, tun, "checksum %s\n",
1253 tun->dev->name, arg ? "disabled" : "enabled"); 1271 arg ? "disabled" : "enabled");
1254 break; 1272 break;
1255 1273
1256 case TUNSETPERSIST: 1274 case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1260 else 1278 else
1261 tun->flags &= ~TUN_PERSIST; 1279 tun->flags &= ~TUN_PERSIST;
1262 1280
1263 DBG(KERN_INFO "%s: persist %s\n", 1281 tun_debug(KERN_INFO, tun, "persist %s\n",
1264 tun->dev->name, arg ? "enabled" : "disabled"); 1282 arg ? "enabled" : "disabled");
1265 break; 1283 break;
1266 1284
1267 case TUNSETOWNER: 1285 case TUNSETOWNER:
1268 /* Set owner of the device */ 1286 /* Set owner of the device */
1269 tun->owner = (uid_t) arg; 1287 tun->owner = (uid_t) arg;
1270 1288
1271 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 1289 tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
1272 break; 1290 break;
1273 1291
1274 case TUNSETGROUP: 1292 case TUNSETGROUP:
1275 /* Set group of the device */ 1293 /* Set group of the device */
1276 tun->group= (gid_t) arg; 1294 tun->group= (gid_t) arg;
1277 1295
1278 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); 1296 tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
1279 break; 1297 break;
1280 1298
1281 case TUNSETLINK: 1299 case TUNSETLINK:
1282 /* Only allow setting the type when the interface is down */ 1300 /* Only allow setting the type when the interface is down */
1283 if (tun->dev->flags & IFF_UP) { 1301 if (tun->dev->flags & IFF_UP) {
1284 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", 1302 tun_debug(KERN_INFO, tun,
1285 tun->dev->name); 1303 "Linktype set failed because interface is up\n");
1286 ret = -EBUSY; 1304 ret = -EBUSY;
1287 } else { 1305 } else {
1288 tun->dev->type = (int) arg; 1306 tun->dev->type = (int) arg;
1289 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); 1307 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1308 tun->dev->type);
1290 ret = 0; 1309 ret = 0;
1291 } 1310 }
1292 break; 1311 break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1318 1337
1319 case SIOCSIFHWADDR: 1338 case SIOCSIFHWADDR:
1320 /* Set hw address */ 1339 /* Set hw address */
1321 DBG(KERN_DEBUG "%s: set hw address: %pM\n", 1340 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1322 tun->dev->name, ifr.ifr_hwaddr.sa_data); 1341 ifr.ifr_hwaddr.sa_data);
1323 1342
1324 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1343 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1325 break; 1344 break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1433 if (!tun) 1452 if (!tun)
1434 return -EBADFD; 1453 return -EBADFD;
1435 1454
1436 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1455 tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1437 1456
1438 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1457 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1439 goto out; 1458 goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1455{ 1474{
1456 struct tun_file *tfile; 1475 struct tun_file *tfile;
1457 1476
1458 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1477 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1459 1478
1460 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1479 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1461 if (!tfile) 1480 if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1476 if (tun) { 1495 if (tun) {
1477 struct net_device *dev = tun->dev; 1496 struct net_device *dev = tun->dev;
1478 1497
1479 DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); 1498 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1480 1499
1481 __tun_detach(tun); 1500 __tun_detach(tun);
1482 1501
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
1607{ 1626{
1608 int ret = 0; 1627 int ret = 0;
1609 1628
1610 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1629 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1611 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1630 pr_info("%s\n", DRV_COPYRIGHT);
1612 1631
1613 ret = rtnl_link_register(&tun_link_ops); 1632 ret = rtnl_link_register(&tun_link_ops);
1614 if (ret) { 1633 if (ret) {
1615 printk(KERN_ERR "tun: Can't register link_ops\n"); 1634 pr_err("Can't register link_ops\n");
1616 goto err_linkops; 1635 goto err_linkops;
1617 } 1636 }
1618 1637
1619 ret = misc_register(&tun_miscdev); 1638 ret = misc_register(&tun_miscdev);
1620 if (ret) { 1639 if (ret) {
1621 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1640 pr_err("Can't register misc device %d\n", TUN_MINOR);
1622 goto err_misc; 1641 goto err_misc;
1623 } 1642 }
1624 return 0; 1643 return 0;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
651 .driver_info = (unsigned long)&dm9601_info, 651 .driver_info = (unsigned long)&dm9601_info,
652 }, 652 },
653 { 653 {
654 USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
655 .driver_info = (unsigned long)&dm9601_info,
656 },
657 {
654 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ 658 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
655 .driver_info = (unsigned long)&dm9601_info, 659 .driver_info = (unsigned long)&dm9601_info,
656 }, 660 },
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index e40f619b62b1..395423aeec00 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -3387,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
3387#endif 3387#endif
3388}; 3388};
3389 3389
3390static int __devinit vxge_device_revision(struct vxgedev *vdev)
3391{
3392 int ret;
3393 u8 revision;
3394
3395 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3396 if (ret)
3397 return -EIO;
3398
3399 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3400 return 0;
3401}
3402
3403static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3390static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3404 struct vxge_config *config, 3391 struct vxge_config *config,
3405 int high_dma, int no_of_vpath, 3392 int high_dma, int no_of_vpath,
@@ -3439,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3439 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3426 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3440 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3427 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3441 vdev->rx_hwts = 0; 3428 vdev->rx_hwts = 0;
3442 3429 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3443 ret = vxge_device_revision(vdev);
3444 if (ret < 0)
3445 goto _out1;
3446 3430
3447 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3431 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3448 3432
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index b4338f389394..7aeb113cbb90 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
274source "drivers/net/wireless/hostap/Kconfig" 274source "drivers/net/wireless/hostap/Kconfig"
275source "drivers/net/wireless/ipw2x00/Kconfig" 275source "drivers/net/wireless/ipw2x00/Kconfig"
276source "drivers/net/wireless/iwlwifi/Kconfig" 276source "drivers/net/wireless/iwlwifi/Kconfig"
277source "drivers/net/wireless/iwlegacy/Kconfig"
277source "drivers/net/wireless/iwmc3200wifi/Kconfig" 278source "drivers/net/wireless/iwmc3200wifi/Kconfig"
278source "drivers/net/wireless/libertas/Kconfig" 279source "drivers/net/wireless/libertas/Kconfig"
279source "drivers/net/wireless/orinoco/Kconfig" 280source "drivers/net/wireless/orinoco/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 9760561a27a5..ddd3fb6ba1d3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/
24obj-$(CONFIG_ZD1211RW) += zd1211rw/ 24obj-$(CONFIG_ZD1211RW) += zd1211rw/
25obj-$(CONFIG_RTL8180) += rtl818x/ 25obj-$(CONFIG_RTL8180) += rtl818x/
26obj-$(CONFIG_RTL8187) += rtl818x/ 26obj-$(CONFIG_RTL8187) += rtl818x/
27obj-$(CONFIG_RTL8192CE) += rtlwifi/ 27obj-$(CONFIG_RTLWIFI) += rtlwifi/
28 28
29# 16-bit wireless PCMCIA client drivers 29# 16-bit wireless PCMCIA client drivers
30obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 30obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
41 41
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLAGN) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
45obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
46 47
47obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9aa1bc0a947..afe2cbc6cb24 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1658} 1658}
1659 1659
1660/* Put adm8211_tx_hdr on skb and transmit */ 1660/* Put adm8211_tx_hdr on skb and transmit */
1661static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1661static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1662{ 1662{
1663 struct adm8211_tx_hdr *txhdr; 1663 struct adm8211_tx_hdr *txhdr;
1664 size_t payload_len, hdrlen; 1664 size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1707 txhdr->retry_limit = info->control.rates[0].count; 1707 txhdr->retry_limit = info->control.rates[0].count;
1708 1708
1709 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); 1709 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1710
1711 return NETDEV_TX_OK;
1712} 1710}
1713 1711
1714static int adm8211_alloc_rings(struct ieee80211_hw *dev) 1712static int adm8211_alloc_rings(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1476314afa8a..298601436ee2 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
1728 ieee80211_wake_queues(priv->hw); 1728 ieee80211_wake_queues(priv->hw);
1729} 1729}
1730 1730
1731static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1731static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1732{ 1732{
1733 struct at76_priv *priv = hw->priv; 1733 struct at76_priv *priv = hw->priv;
1734 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1734 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1741 if (priv->tx_urb->status == -EINPROGRESS) { 1741 if (priv->tx_urb->status == -EINPROGRESS) {
1742 wiphy_err(priv->hw->wiphy, 1742 wiphy_err(priv->hw->wiphy,
1743 "%s called while tx urb is pending\n", __func__); 1743 "%s called while tx urb is pending\n", __func__);
1744 return NETDEV_TX_BUSY; 1744 dev_kfree_skb_any(skb);
1745 return;
1745 } 1746 }
1746 1747
1747 /* The following code lines are important when the device is going to 1748 /* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1755 if (compare_ether_addr(priv->bssid, mgmt->bssid)) { 1756 if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
1756 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); 1757 memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
1757 ieee80211_queue_work(hw, &priv->work_join_bssid); 1758 ieee80211_queue_work(hw, &priv->work_join_bssid);
1758 return NETDEV_TX_BUSY; 1759 dev_kfree_skb_any(skb);
1760 return;
1759 } 1761 }
1760 } 1762 }
1761 1763
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1795 priv->tx_urb, 1797 priv->tx_urb,
1796 priv->tx_urb->hcpriv, priv->tx_urb->complete); 1798 priv->tx_urb->hcpriv, priv->tx_urb->complete);
1797 } 1799 }
1798
1799 return 0;
1800} 1800}
1801 1801
1802static int at76_mac80211_start(struct ieee80211_hw *hw) 1802static int at76_mac80211_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 4f845f80c098..371e4ce49528 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
224int ar9170_nag_limiter(struct ar9170 *ar); 224int ar9170_nag_limiter(struct ar9170 *ar);
225 225
226/* MAC */ 226/* MAC */
227int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 227void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
228int ar9170_init_mac(struct ar9170 *ar); 228int ar9170_init_mac(struct ar9170 *ar);
229int ar9170_set_qos(struct ar9170 *ar); 229int ar9170_set_qos(struct ar9170 *ar);
230int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast); 230int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index a9111e1161fd..b761fec0d721 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
1475 msecs_to_jiffies(AR9170_JANITOR_DELAY)); 1475 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1476} 1476}
1477 1477
1478int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1478void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1479{ 1479{
1480 struct ar9170 *ar = hw->priv; 1480 struct ar9170 *ar = hw->priv;
1481 struct ieee80211_tx_info *info; 1481 struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1493 skb_queue_tail(&ar->tx_pending[queue], skb); 1493 skb_queue_tail(&ar->tx_pending[queue], skb);
1494 1494
1495 ar9170_tx(ar); 1495 ar9170_tx(ar);
1496 return NETDEV_TX_OK; 1496 return;
1497 1497
1498err_free: 1498err_free:
1499 dev_kfree_skb_any(skb); 1499 dev_kfree_skb_any(skb);
1500 return NETDEV_TX_OK;
1501} 1500}
1502 1501
1503static int ar9170_op_add_interface(struct ieee80211_hw *hw, 1502static int ar9170_op_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 70abb61e9eff..0ee54eb333de 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1164,8 +1164,8 @@ struct ath5k_txq;
1164 1164
1165void set_beacon_filter(struct ieee80211_hw *hw, bool enable); 1165void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
1166bool ath_any_vif_assoc(struct ath5k_softc *sc); 1166bool ath_any_vif_assoc(struct ath5k_softc *sc);
1167int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1167void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1168 struct ath5k_txq *txq); 1168 struct ath5k_txq *txq);
1169int ath5k_init_hw(struct ath5k_softc *sc); 1169int ath5k_init_hw(struct ath5k_softc *sc);
1170int ath5k_stop_hw(struct ath5k_softc *sc); 1170int ath5k_stop_hw(struct ath5k_softc *sc);
1171void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif); 1171void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index dbc45e085434..91411e9b4b68 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1361,7 +1361,7 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1361 * right now, so it's not too bad... 1361 * right now, so it's not too bad...
1362 */ 1362 */
1363 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); 1363 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1364 rxs->flag |= RX_FLAG_TSFT; 1364 rxs->flag |= RX_FLAG_MACTIME_MPDU;
1365 1365
1366 rxs->freq = sc->curchan->center_freq; 1366 rxs->freq = sc->curchan->center_freq;
1367 rxs->band = sc->curchan->band; 1367 rxs->band = sc->curchan->band;
@@ -1518,7 +1518,7 @@ unlock:
1518* TX Handling * 1518* TX Handling *
1519\*************/ 1519\*************/
1520 1520
1521int 1521void
1522ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1522ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1523 struct ath5k_txq *txq) 1523 struct ath5k_txq *txq)
1524{ 1524{
@@ -1567,11 +1567,10 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1567 spin_unlock_irqrestore(&sc->txbuflock, flags); 1567 spin_unlock_irqrestore(&sc->txbuflock, flags);
1568 goto drop_packet; 1568 goto drop_packet;
1569 } 1569 }
1570 return NETDEV_TX_OK; 1570 return;
1571 1571
1572drop_packet: 1572drop_packet:
1573 dev_kfree_skb_any(skb); 1573 dev_kfree_skb_any(skb);
1574 return NETDEV_TX_OK;
1575} 1574}
1576 1575
1577static void 1576static void
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index a60a726a140c..1fbe3c0b9f08 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -52,7 +52,7 @@ extern int ath5k_modparam_nohwcrypt;
52* Mac80211 functions * 52* Mac80211 functions *
53\********************/ 53\********************/
54 54
55static int 55static void
56ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 56ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
57{ 57{
58 struct ath5k_softc *sc = hw->priv; 58 struct ath5k_softc *sc = hw->priv;
@@ -60,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
60 60
61 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) { 61 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
62 dev_kfree_skb_any(skb); 62 dev_kfree_skb_any(skb);
63 return 0; 63 return;
64 } 64 }
65 65
66 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]); 66 ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
67} 67}
68 68
69 69
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6fa3c24af2da..7f5de6e4448b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -78,15 +78,15 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
78 /* Awake Setting */ 78 /* Awake Setting */
79 79
80 INIT_INI_ARRAY(&ah->iniPcieSerdes, 80 INIT_INI_ARRAY(&ah->iniPcieSerdes,
81 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1, 81 ar9485_1_1_pcie_phy_clkreq_disable_L1,
82 ARRAY_SIZE(ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1), 82 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
83 2); 83 2);
84 84
85 /* Sleep Setting */ 85 /* Sleep Setting */
86 86
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
88 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1, 88 ar9485_1_1_pcie_phy_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1), 89 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
90 2); 90 2);
91 } else if (AR_SREV_9485(ah)) { 91 } else if (AR_SREV_9485(ah)) {
92 /* mac */ 92 /* mac */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index f9f0389b92ab..c718ab512a97 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -449,26 +449,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
449 449
450#define ATH_LED_PIN_DEF 1 450#define ATH_LED_PIN_DEF 1
451#define ATH_LED_PIN_9287 8 451#define ATH_LED_PIN_9287 8
452#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ 452#define ATH_LED_PIN_9485 6
453#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
454
455enum ath_led_type {
456 ATH_LED_RADIO,
457 ATH_LED_ASSOC,
458 ATH_LED_TX,
459 ATH_LED_RX
460};
461
462struct ath_led {
463 struct ath_softc *sc;
464 struct led_classdev led_cdev;
465 enum ath_led_type led_type;
466 char name[32];
467 bool registered;
468};
469 453
454#ifdef CONFIG_MAC80211_LEDS
470void ath_init_leds(struct ath_softc *sc); 455void ath_init_leds(struct ath_softc *sc);
471void ath_deinit_leds(struct ath_softc *sc); 456void ath_deinit_leds(struct ath_softc *sc);
457#else
458static inline void ath_init_leds(struct ath_softc *sc)
459{
460}
461
462static inline void ath_deinit_leds(struct ath_softc *sc)
463{
464}
465#endif
466
472 467
473/* Antenna diversity/combining */ 468/* Antenna diversity/combining */
474#define ATH_ANT_RX_CURRENT_SHIFT 4 469#define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -620,15 +615,11 @@ struct ath_softc {
620 struct ath_beacon beacon; 615 struct ath_beacon beacon;
621 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 616 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
622 617
623 struct ath_led radio_led; 618#ifdef CONFIG_MAC80211_LEDS
624 struct ath_led assoc_led; 619 bool led_registered;
625 struct ath_led tx_led; 620 char led_name[32];
626 struct ath_led rx_led; 621 struct led_classdev led_cdev;
627 struct delayed_work ath_led_blink_work; 622#endif
628 int led_on_duration;
629 int led_off_duration;
630 int led_on_cnt;
631 int led_off_cnt;
632 623
633 struct ath9k_hw_cal_data caldata; 624 struct ath9k_hw_cal_data caldata;
634 int last_rssi; 625 int last_rssi;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index fb4f17a5183d..0fb8f8ac275a 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -20,121 +20,31 @@
20/* LED functions */ 20/* LED functions */
21/********************************/ 21/********************************/
22 22
23static void ath_led_blink_work(struct work_struct *work) 23#ifdef CONFIG_MAC80211_LEDS
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 ath_led_blink_work.work);
27
28 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
29 return;
30
31 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
32 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
33 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
34 else
35 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
36 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
37
38 ieee80211_queue_delayed_work(sc->hw,
39 &sc->ath_led_blink_work,
40 (sc->sc_flags & SC_OP_LED_ON) ?
41 msecs_to_jiffies(sc->led_off_duration) :
42 msecs_to_jiffies(sc->led_on_duration));
43
44 sc->led_on_duration = sc->led_on_cnt ?
45 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
46 ATH_LED_ON_DURATION_IDLE;
47 sc->led_off_duration = sc->led_off_cnt ?
48 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
49 ATH_LED_OFF_DURATION_IDLE;
50 sc->led_on_cnt = sc->led_off_cnt = 0;
51 if (sc->sc_flags & SC_OP_LED_ON)
52 sc->sc_flags &= ~SC_OP_LED_ON;
53 else
54 sc->sc_flags |= SC_OP_LED_ON;
55}
56
57static void ath_led_brightness(struct led_classdev *led_cdev, 24static void ath_led_brightness(struct led_classdev *led_cdev,
58 enum led_brightness brightness) 25 enum led_brightness brightness)
59{ 26{
60 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); 27 struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
61 struct ath_softc *sc = led->sc; 28 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
62
63 switch (brightness) {
64 case LED_OFF:
65 if (led->led_type == ATH_LED_ASSOC ||
66 led->led_type == ATH_LED_RADIO) {
67 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
68 (led->led_type == ATH_LED_RADIO));
69 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
70 if (led->led_type == ATH_LED_RADIO)
71 sc->sc_flags &= ~SC_OP_LED_ON;
72 } else {
73 sc->led_off_cnt++;
74 }
75 break;
76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 if (led_blink)
80 ieee80211_queue_delayed_work(sc->hw,
81 &sc->ath_led_blink_work, 0);
82 } else if (led->led_type == ATH_LED_RADIO) {
83 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
84 sc->sc_flags |= SC_OP_LED_ON;
85 } else {
86 sc->led_on_cnt++;
87 }
88 break;
89 default:
90 break;
91 }
92}
93
94static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
95 char *trigger)
96{
97 int ret;
98
99 led->sc = sc;
100 led->led_cdev.name = led->name;
101 led->led_cdev.default_trigger = trigger;
102 led->led_cdev.brightness_set = ath_led_brightness;
103
104 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
105 if (ret)
106 ath_err(ath9k_hw_common(sc->sc_ah),
107 "Failed to register led:%s", led->name);
108 else
109 led->registered = 1;
110 return ret;
111}
112
113static void ath_unregister_led(struct ath_led *led)
114{
115 if (led->registered) {
116 led_classdev_unregister(&led->led_cdev);
117 led->registered = 0;
118 }
119} 29}
120 30
121void ath_deinit_leds(struct ath_softc *sc) 31void ath_deinit_leds(struct ath_softc *sc)
122{ 32{
123 ath_unregister_led(&sc->assoc_led); 33 if (!sc->led_registered)
124 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 34 return;
125 ath_unregister_led(&sc->tx_led); 35
126 ath_unregister_led(&sc->rx_led); 36 ath_led_brightness(&sc->led_cdev, LED_OFF);
127 ath_unregister_led(&sc->radio_led); 37 led_classdev_unregister(&sc->led_cdev);
128 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
129} 38}
130 39
131void ath_init_leds(struct ath_softc *sc) 40void ath_init_leds(struct ath_softc *sc)
132{ 41{
133 char *trigger;
134 int ret; 42 int ret;
135 43
136 if (AR_SREV_9287(sc->sc_ah)) 44 if (AR_SREV_9287(sc->sc_ah))
137 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 45 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
46 else if (AR_SREV_9485(sc->sc_ah))
47 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
138 else 48 else
139 sc->sc_ah->led_pin = ATH_LED_PIN_DEF; 49 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
140 50
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
144 /* LED off, active low */ 54 /* LED off, active low */
145 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 55 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
146 56
147 if (led_blink) 57 if (!led_blink)
148 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); 58 sc->led_cdev.default_trigger =
149 59 ieee80211_get_radio_led_name(sc->hw);
150 trigger = ieee80211_get_radio_led_name(sc->hw); 60
151 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), 61 snprintf(sc->led_name, sizeof(sc->led_name),
152 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy)); 62 "ath9k-%s", wiphy_name(sc->hw->wiphy));
153 ret = ath_register_led(sc, &sc->radio_led, trigger); 63 sc->led_cdev.name = sc->led_name;
154 sc->radio_led.led_type = ATH_LED_RADIO; 64 sc->led_cdev.brightness_set = ath_led_brightness;
155 if (ret) 65
156 goto fail; 66 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
157 67 if (ret < 0)
158 trigger = ieee80211_get_assoc_led_name(sc->hw); 68 return;
159 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), 69
160 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy)); 70 sc->led_registered = true;
161 ret = ath_register_led(sc, &sc->assoc_led, trigger);
162 sc->assoc_led.led_type = ATH_LED_ASSOC;
163 if (ret)
164 goto fail;
165
166 trigger = ieee80211_get_tx_led_name(sc->hw);
167 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
168 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
169 ret = ath_register_led(sc, &sc->tx_led, trigger);
170 sc->tx_led.led_type = ATH_LED_TX;
171 if (ret)
172 goto fail;
173
174 trigger = ieee80211_get_rx_led_name(sc->hw);
175 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
176 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
177 ret = ath_register_led(sc, &sc->rx_led, trigger);
178 sc->rx_led.led_type = ATH_LED_RX;
179 if (ret)
180 goto fail;
181
182 return;
183
184fail:
185 if (led_blink)
186 cancel_delayed_work_sync(&sc->ath_led_blink_work);
187 ath_deinit_leds(sc);
188} 71}
72#endif
189 73
190/*******************/ 74/*******************/
191/* Rfkill */ 75/* Rfkill */
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084eb9cb..f1b8af64569c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
52 { USB_DEVICE(0x083A, 0xA704), 52 { USB_DEVICE(0x083A, 0xA704),
53 .driver_info = AR9280_USB }, /* SMC Networks */ 53 .driver_info = AR9280_USB }, /* SMC Networks */
54 54
55 { USB_DEVICE(0x0cf3, 0x20ff),
56 .driver_info = STORAGE_DEVICE },
57
55 { }, 58 { },
56}; 59};
57 60
@@ -219,8 +222,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
219 struct tx_buf *tx_buf = NULL; 222 struct tx_buf *tx_buf = NULL;
220 struct sk_buff *nskb = NULL; 223 struct sk_buff *nskb = NULL;
221 int ret = 0, i; 224 int ret = 0, i;
222 u16 *hdr, tx_skb_cnt = 0; 225 u16 tx_skb_cnt = 0;
223 u8 *buf; 226 u8 *buf;
227 __le16 *hdr;
224 228
225 if (hif_dev->tx.tx_skb_cnt == 0) 229 if (hif_dev->tx.tx_skb_cnt == 0)
226 return 0; 230 return 0;
@@ -245,9 +249,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
245 249
246 buf = tx_buf->buf; 250 buf = tx_buf->buf;
247 buf += tx_buf->offset; 251 buf += tx_buf->offset;
248 hdr = (u16 *)buf; 252 hdr = (__le16 *)buf;
249 *hdr++ = nskb->len; 253 *hdr++ = cpu_to_le16(nskb->len);
250 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 254 *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
251 buf += 4; 255 buf += 4;
252 memcpy(buf, nskb->data, nskb->len); 256 memcpy(buf, nskb->data, nskb->len);
253 tx_buf->len = nskb->len + 4; 257 tx_buf->len = nskb->len + 4;
@@ -913,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
913 if (ret) { 917 if (ret) {
914 dev_err(&hif_dev->udev->dev, 918 dev_err(&hif_dev->udev->dev,
915 "ath9k_htc: Unable to allocate URBs\n"); 919 "ath9k_htc: Unable to allocate URBs\n");
916 goto err_urb; 920 goto err_fw_download;
917 } 921 }
918 922
919 return 0; 923 return 0;
920 924
921err_urb:
922 ath9k_hif_usb_dealloc_urbs(hif_dev);
923err_fw_download: 925err_fw_download:
924 release_firmware(hif_dev->firmware); 926 release_firmware(hif_dev->firmware);
925err_fw_req: 927err_fw_req:
@@ -934,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
934 release_firmware(hif_dev->firmware); 936 release_firmware(hif_dev->firmware);
935} 937}
936 938
939/*
940 * An exact copy of the function from zd1211rw.
941 */
942static int send_eject_command(struct usb_interface *interface)
943{
944 struct usb_device *udev = interface_to_usbdev(interface);
945 struct usb_host_interface *iface_desc = &interface->altsetting[0];
946 struct usb_endpoint_descriptor *endpoint;
947 unsigned char *cmd;
948 u8 bulk_out_ep;
949 int r;
950
951 /* Find bulk out endpoint */
952 for (r = 1; r >= 0; r--) {
953 endpoint = &iface_desc->endpoint[r].desc;
954 if (usb_endpoint_dir_out(endpoint) &&
955 usb_endpoint_xfer_bulk(endpoint)) {
956 bulk_out_ep = endpoint->bEndpointAddress;
957 break;
958 }
959 }
960 if (r == -1) {
961 dev_err(&udev->dev,
962 "ath9k_htc: Could not find bulk out endpoint\n");
963 return -ENODEV;
964 }
965
966 cmd = kzalloc(31, GFP_KERNEL);
967 if (cmd == NULL)
968 return -ENODEV;
969
970 /* USB bulk command block */
971 cmd[0] = 0x55; /* bulk command signature */
972 cmd[1] = 0x53; /* bulk command signature */
973 cmd[2] = 0x42; /* bulk command signature */
974 cmd[3] = 0x43; /* bulk command signature */
975 cmd[14] = 6; /* command length */
976
977 cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
978 cmd[19] = 0x2; /* eject disc */
979
980 dev_info(&udev->dev, "Ejecting storage device...\n");
981 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
982 cmd, 31, NULL, 2000);
983 kfree(cmd);
984 if (r)
985 return r;
986
987 /* At this point, the device disconnects and reconnects with the real
988 * ID numbers. */
989
990 usb_set_intfdata(interface, NULL);
991 return 0;
992}
993
937static int ath9k_hif_usb_probe(struct usb_interface *interface, 994static int ath9k_hif_usb_probe(struct usb_interface *interface,
938 const struct usb_device_id *id) 995 const struct usb_device_id *id)
939{ 996{
@@ -941,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
941 struct hif_device_usb *hif_dev; 998 struct hif_device_usb *hif_dev;
942 int ret = 0; 999 int ret = 0;
943 1000
1001 if (id->driver_info == STORAGE_DEVICE)
1002 return send_eject_command(interface);
1003
944 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); 1004 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
945 if (!hif_dev) { 1005 if (!hif_dev) {
946 ret = -ENOMEM; 1006 ret = -ENOMEM;
@@ -1027,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1027 struct hif_device_usb *hif_dev = usb_get_intfdata(interface); 1087 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1028 bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false; 1088 bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
1029 1089
1030 if (hif_dev) { 1090 if (!hif_dev)
1031 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); 1091 return;
1032 ath9k_htc_hw_free(hif_dev->htc_handle); 1092
1033 ath9k_hif_usb_dev_deinit(hif_dev); 1093 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
1034 usb_set_intfdata(interface, NULL); 1094 ath9k_htc_hw_free(hif_dev->htc_handle);
1035 } 1095 ath9k_hif_usb_dev_deinit(hif_dev);
1096 usb_set_intfdata(interface, NULL);
1036 1097
1037 if (!unplugged && (hif_dev->flags & HIF_USB_START)) 1098 if (!unplugged && (hif_dev->flags & HIF_USB_START))
1038 ath9k_hif_usb_reboot(udev); 1099 ath9k_hif_usb_reboot(udev);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 0cb504d7b8c4..753a245c5ad1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -32,6 +32,7 @@
32#include "wmi.h" 32#include "wmi.h"
33 33
34#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ 34#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
35#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
35#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ 36#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
36#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 37#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
37#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 38#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
204 __be32 ht_tx_xretries; 205 __be32 ht_tx_xretries;
205} __packed; 206} __packed;
206 207
208#define ATH9K_HTC_MAX_VIF 2
209#define ATH9K_HTC_MAX_BCN_VIF 2
210
211#define INC_VIF(_priv, _type) do { \
212 switch (_type) { \
213 case NL80211_IFTYPE_STATION: \
214 _priv->num_sta_vif++; \
215 break; \
216 case NL80211_IFTYPE_ADHOC: \
217 _priv->num_ibss_vif++; \
218 break; \
219 case NL80211_IFTYPE_AP: \
220 _priv->num_ap_vif++; \
221 break; \
222 default: \
223 break; \
224 } \
225 } while (0)
226
227#define DEC_VIF(_priv, _type) do { \
228 switch (_type) { \
229 case NL80211_IFTYPE_STATION: \
230 _priv->num_sta_vif--; \
231 break; \
232 case NL80211_IFTYPE_ADHOC: \
233 _priv->num_ibss_vif--; \
234 break; \
235 case NL80211_IFTYPE_AP: \
236 _priv->num_ap_vif--; \
237 break; \
238 default: \
239 break; \
240 } \
241 } while (0)
242
207struct ath9k_htc_vif { 243struct ath9k_htc_vif {
208 u8 index; 244 u8 index;
245 u16 seq_no;
246 bool beacon_configured;
247};
248
249struct ath9k_vif_iter_data {
250 const u8 *hw_macaddr;
251 u8 mask[ETH_ALEN];
209}; 252};
210 253
211#define ATH9K_HTC_MAX_STA 8 254#define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
310 353
311struct htc_beacon_config { 354struct htc_beacon_config {
312 u16 beacon_interval; 355 u16 beacon_interval;
313 u16 listen_interval;
314 u16 dtim_period; 356 u16 dtim_period;
315 u16 bmiss_timeout; 357 u16 bmiss_timeout;
316 u8 dtim_count;
317}; 358};
318 359
319struct ath_btcoex { 360struct ath_btcoex {
@@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
333#define OP_SCANNING BIT(1) 374#define OP_SCANNING BIT(1)
334#define OP_LED_ASSOCIATED BIT(2) 375#define OP_LED_ASSOCIATED BIT(2)
335#define OP_LED_ON BIT(3) 376#define OP_LED_ON BIT(3)
336#define OP_PREAMBLE_SHORT BIT(4) 377#define OP_ENABLE_BEACON BIT(4)
337#define OP_PROTECT_ENABLE BIT(5) 378#define OP_LED_DEINIT BIT(5)
338#define OP_ASSOCIATED BIT(6) 379#define OP_BT_PRIORITY_DETECTED BIT(6)
339#define OP_ENABLE_BEACON BIT(7) 380#define OP_BT_SCAN BIT(7)
340#define OP_LED_DEINIT BIT(8) 381#define OP_ANI_RUNNING BIT(8)
341#define OP_BT_PRIORITY_DETECTED BIT(9) 382#define OP_TSF_RESET BIT(9)
342#define OP_BT_SCAN BIT(10)
343 383
344struct ath9k_htc_priv { 384struct ath9k_htc_priv {
345 struct device *dev; 385 struct device *dev;
@@ -358,13 +398,22 @@ struct ath9k_htc_priv {
358 enum htc_endpoint_id data_vi_ep; 398 enum htc_endpoint_id data_vi_ep;
359 enum htc_endpoint_id data_vo_ep; 399 enum htc_endpoint_id data_vo_ep;
360 400
401 u8 vif_slot;
402 u8 mon_vif_idx;
403 u8 sta_slot;
404 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
405 u8 num_ibss_vif;
406 u8 num_sta_vif;
407 u8 num_ap_vif;
408
361 u16 op_flags; 409 u16 op_flags;
362 u16 curtxpow; 410 u16 curtxpow;
363 u16 txpowlimit; 411 u16 txpowlimit;
364 u16 nvifs; 412 u16 nvifs;
365 u16 nstations; 413 u16 nstations;
366 u16 seq_no;
367 u32 bmiss_cnt; 414 u32 bmiss_cnt;
415 bool rearm_ani;
416 bool reconfig_beacon;
368 417
369 struct ath9k_hw_cal_data caldata; 418 struct ath9k_hw_cal_data caldata;
370 419
@@ -382,7 +431,7 @@ struct ath9k_htc_priv {
382 struct ath9k_htc_rx rx; 431 struct ath9k_htc_rx rx;
383 struct tasklet_struct tx_tasklet; 432 struct tasklet_struct tx_tasklet;
384 struct sk_buff_head tx_queue; 433 struct sk_buff_head tx_queue;
385 struct delayed_work ath9k_ani_work; 434 struct delayed_work ani_work;
386 struct work_struct ps_work; 435 struct work_struct ps_work;
387 struct work_struct fatal_work; 436 struct work_struct fatal_work;
388 437
@@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv);
424void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); 473void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
425void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 474void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
426 struct ieee80211_vif *vif); 475 struct ieee80211_vif *vif);
476void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
427void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); 477void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
428 478
429void ath9k_htc_rxep(void *priv, struct sk_buff *skb, 479void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
436int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); 486int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
437void ath9k_htc_station_work(struct work_struct *work); 487void ath9k_htc_station_work(struct work_struct *work);
438void ath9k_htc_aggr_work(struct work_struct *work); 488void ath9k_htc_aggr_work(struct work_struct *work);
439void ath9k_ani_work(struct work_struct *work);; 489void ath9k_htc_ani_work(struct work_struct *work);
440void ath_start_ani(struct ath9k_htc_priv *priv); 490void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
491void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
441 492
442int ath9k_tx_init(struct ath9k_htc_priv *priv); 493int ath9k_tx_init(struct ath9k_htc_priv *priv);
443void ath9k_tx_tasklet(unsigned long data); 494void ath9k_tx_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 87cc65a78a3f..8d1d8792436d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
123 /* TSF out of range threshold fixed at 1 second */ 123 /* TSF out of range threshold fixed at 1 second */
124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
125 125
126 ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 126 ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
127 ath_dbg(common, ATH_DBG_BEACON, 127 intval, tsf, tsftu);
128 ath_dbg(common, ATH_DBG_CONFIG,
128 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 129 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
129 bs.bs_bmissthreshold, bs.bs_sleepduration, 130 bs.bs_bmissthreshold, bs.bs_sleepduration,
130 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 131 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
138 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 139 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
139} 140}
140 141
142static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
143 struct htc_beacon_config *bss_conf)
144{
145 struct ath_common *common = ath9k_hw_common(priv->ah);
146 enum ath9k_int imask = 0;
147 u32 nexttbtt, intval, tsftu;
148 __be32 htc_imask = 0;
149 int ret;
150 u8 cmd_rsp;
151 u64 tsf;
152
153 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
154 intval /= ATH9K_HTC_MAX_BCN_VIF;
155 nexttbtt = intval;
156
157 if (priv->op_flags & OP_TSF_RESET) {
158 intval |= ATH9K_BEACON_RESET_TSF;
159 priv->op_flags &= ~OP_TSF_RESET;
160 } else {
161 /*
162 * Pull nexttbtt forward to reflect the current TSF.
163 */
164 tsf = ath9k_hw_gettsf64(priv->ah);
165 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
166 do {
167 nexttbtt += intval;
168 } while (nexttbtt < tsftu);
169 }
170
171 intval |= ATH9K_BEACON_ENA;
172
173 if (priv->op_flags & OP_ENABLE_BEACON)
174 imask |= ATH9K_INT_SWBA;
175
176 ath_dbg(common, ATH_DBG_CONFIG,
177 "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
178 bss_conf->beacon_interval, nexttbtt, imask);
179
180 WMI_CMD(WMI_DISABLE_INTR_CMDID);
181 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
182 priv->bmiss_cnt = 0;
183 htc_imask = cpu_to_be32(imask);
184 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
185}
186
141static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, 187static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
142 struct htc_beacon_config *bss_conf) 188 struct htc_beacon_config *bss_conf)
143{ 189{
144 struct ath_common *common = ath9k_hw_common(priv->ah); 190 struct ath_common *common = ath9k_hw_common(priv->ah);
145 enum ath9k_int imask = 0; 191 enum ath9k_int imask = 0;
146 u32 nexttbtt, intval; 192 u32 nexttbtt, intval, tsftu;
147 __be32 htc_imask = 0; 193 __be32 htc_imask = 0;
148 int ret; 194 int ret;
149 u8 cmd_rsp; 195 u8 cmd_rsp;
196 u64 tsf;
150 197
151 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD; 198 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
152 nexttbtt = intval; 199 nexttbtt = intval;
200
201 /*
202 * Pull nexttbtt forward to reflect the current TSF.
203 */
204 tsf = ath9k_hw_gettsf64(priv->ah);
205 tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
206 do {
207 nexttbtt += intval;
208 } while (nexttbtt < tsftu);
209
153 intval |= ATH9K_BEACON_ENA; 210 intval |= ATH9K_BEACON_ENA;
154 if (priv->op_flags & OP_ENABLE_BEACON) 211 if (priv->op_flags & OP_ENABLE_BEACON)
155 imask |= ATH9K_INT_SWBA; 212 imask |= ATH9K_INT_SWBA;
156 213
157 ath_dbg(common, ATH_DBG_BEACON, 214 ath_dbg(common, ATH_DBG_CONFIG,
158 "IBSS Beacon config, intval: %d, imask: 0x%x\n", 215 "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
159 bss_conf->beacon_interval, imask); 216 bss_conf->beacon_interval, nexttbtt, imask);
160 217
161 WMI_CMD(WMI_DISABLE_INTR_CMDID); 218 WMI_CMD(WMI_DISABLE_INTR_CMDID);
162 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 219 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
207 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 264 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
208 struct ieee80211_hdr *hdr = 265 struct ieee80211_hdr *hdr =
209 (struct ieee80211_hdr *) beacon->data; 266 (struct ieee80211_hdr *) beacon->data;
210 priv->seq_no += 0x10; 267 avp->seq_no += 0x10;
211 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 268 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
212 hdr->seq_ctrl |= cpu_to_le16(priv->seq_no); 269 hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
213 } 270 }
214 271
215 tx_ctl.type = ATH9K_HTC_NORMAL; 272 tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
253 } 310 }
254} 311}
255 312
313static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
314{
315 bool *beacon_configured = (bool *)data;
316 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
317
318 if (vif->type == NL80211_IFTYPE_STATION &&
319 avp->beacon_configured)
320 *beacon_configured = true;
321}
322
323static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
324 struct ieee80211_vif *vif)
325{
326 struct ath_common *common = ath9k_hw_common(priv->ah);
327 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
328 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
329 bool beacon_configured;
330
331 /*
332 * Changing the beacon interval when multiple AP interfaces
333 * are configured will affect beacon transmission of all
334 * of them.
335 */
336 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
337 (priv->num_ap_vif > 1) &&
338 (vif->type == NL80211_IFTYPE_AP) &&
339 (cur_conf->beacon_interval != bss_conf->beacon_int)) {
340 ath_dbg(common, ATH_DBG_CONFIG,
341 "Changing beacon interval of multiple AP interfaces !\n");
342 return false;
343 }
344
345 /*
346 * If the HW is operating in AP mode, any new station interfaces that
347 * are added cannot change the beacon parameters.
348 */
349 if (priv->num_ap_vif &&
350 (vif->type != NL80211_IFTYPE_AP)) {
351 ath_dbg(common, ATH_DBG_CONFIG,
352 "HW in AP mode, cannot set STA beacon parameters\n");
353 return false;
354 }
355
356 /*
357 * The beacon parameters are configured only for the first
358 * station interface.
359 */
360 if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
361 (priv->num_sta_vif > 1) &&
362 (vif->type == NL80211_IFTYPE_STATION)) {
363 beacon_configured = false;
364 ieee80211_iterate_active_interfaces_atomic(priv->hw,
365 ath9k_htc_beacon_iter,
366 &beacon_configured);
367
368 if (beacon_configured) {
369 ath_dbg(common, ATH_DBG_CONFIG,
370 "Beacon already configured for a station interface\n");
371 return false;
372 }
373 }
374
375 return true;
376}
377
256void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 378void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
257 struct ieee80211_vif *vif) 379 struct ieee80211_vif *vif)
258{ 380{
259 struct ath_common *common = ath9k_hw_common(priv->ah); 381 struct ath_common *common = ath9k_hw_common(priv->ah);
260 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 382 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
261 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 383 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
384 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
385
386 if (!ath9k_htc_check_beacon_config(priv, vif))
387 return;
262 388
263 cur_conf->beacon_interval = bss_conf->beacon_int; 389 cur_conf->beacon_interval = bss_conf->beacon_int;
264 if (cur_conf->beacon_interval == 0) 390 if (cur_conf->beacon_interval == 0)
265 cur_conf->beacon_interval = 100; 391 cur_conf->beacon_interval = 100;
266 392
267 cur_conf->dtim_period = bss_conf->dtim_period; 393 cur_conf->dtim_period = bss_conf->dtim_period;
268 cur_conf->listen_interval = 1;
269 cur_conf->dtim_count = 1;
270 cur_conf->bmiss_timeout = 394 cur_conf->bmiss_timeout =
271 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; 395 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
272 396
273 switch (vif->type) { 397 switch (vif->type) {
274 case NL80211_IFTYPE_STATION: 398 case NL80211_IFTYPE_STATION:
275 ath9k_htc_beacon_config_sta(priv, cur_conf); 399 ath9k_htc_beacon_config_sta(priv, cur_conf);
400 avp->beacon_configured = true;
401 break;
402 case NL80211_IFTYPE_ADHOC:
403 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
404 break;
405 case NL80211_IFTYPE_AP:
406 ath9k_htc_beacon_config_ap(priv, cur_conf);
407 break;
408 default:
409 ath_dbg(common, ATH_DBG_CONFIG,
410 "Unsupported beaconing mode\n");
411 return;
412 }
413}
414
415void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
416{
417 struct ath_common *common = ath9k_hw_common(priv->ah);
418 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
419
420 switch (priv->ah->opmode) {
421 case NL80211_IFTYPE_STATION:
422 ath9k_htc_beacon_config_sta(priv, cur_conf);
276 break; 423 break;
277 case NL80211_IFTYPE_ADHOC: 424 case NL80211_IFTYPE_ADHOC:
278 ath9k_htc_beacon_config_adhoc(priv, cur_conf); 425 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
279 break; 426 break;
427 case NL80211_IFTYPE_AP:
428 ath9k_htc_beacon_config_ap(priv, cur_conf);
429 break;
280 default: 430 default:
281 ath_dbg(common, ATH_DBG_CONFIG, 431 ath_dbg(common, ATH_DBG_CONFIG,
282 "Unsupported beaconing mode\n"); 432 "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a7bc26d1bd66..fc67c937e172 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -679,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
679 (unsigned long)priv); 679 (unsigned long)priv);
680 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, 680 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
681 (unsigned long)priv); 681 (unsigned long)priv);
682 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); 682 INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
683 INIT_WORK(&priv->ps_work, ath9k_ps_work); 683 INIT_WORK(&priv->ps_work, ath9k_ps_work);
684 INIT_WORK(&priv->fatal_work, ath9k_fatal_work); 684 INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
685 685
@@ -787,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
787 struct ath_hw *ah; 787 struct ath_hw *ah;
788 int error = 0; 788 int error = 0;
789 struct ath_regulatory *reg; 789 struct ath_regulatory *reg;
790 char hw_name[64];
790 791
791 /* Bring up device */ 792 /* Bring up device */
792 error = ath9k_init_priv(priv, devid, product, drv_info); 793 error = ath9k_init_priv(priv, devid, product, drv_info);
@@ -827,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
827 goto err_world; 828 goto err_world;
828 } 829 }
829 830
831 ath_dbg(common, ATH_DBG_CONFIG,
832 "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
833 "BE:%d, BK:%d, VI:%d, VO:%d\n",
834 priv->wmi_cmd_ep,
835 priv->beacon_ep,
836 priv->cab_ep,
837 priv->uapsd_ep,
838 priv->mgmt_ep,
839 priv->data_be_ep,
840 priv->data_bk_ep,
841 priv->data_vi_ep,
842 priv->data_vo_ep);
843
844 ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
845 wiphy_info(hw->wiphy, "%s\n", hw_name);
846
830 ath9k_init_leds(priv); 847 ath9k_init_leds(priv);
831 ath9k_start_rfkill_poll(priv); 848 ath9k_start_rfkill_poll(priv);
832 849
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 50fde0e10595..db8c0c044e9e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -105,6 +105,82 @@ void ath9k_ps_work(struct work_struct *work)
105 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); 105 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
106} 106}
107 107
108static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
109{
110 struct ath9k_htc_priv *priv = data;
111 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
112
113 if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
114 priv->reconfig_beacon = true;
115
116 if (bss_conf->assoc) {
117 priv->rearm_ani = true;
118 priv->reconfig_beacon = true;
119 }
120}
121
122static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
123{
124 priv->rearm_ani = false;
125 priv->reconfig_beacon = false;
126
127 ieee80211_iterate_active_interfaces_atomic(priv->hw,
128 ath9k_htc_vif_iter, priv);
129 if (priv->rearm_ani)
130 ath9k_htc_start_ani(priv);
131
132 if (priv->reconfig_beacon) {
133 ath9k_htc_ps_wakeup(priv);
134 ath9k_htc_beacon_reconfig(priv);
135 ath9k_htc_ps_restore(priv);
136 }
137}
138
139static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
140{
141 struct ath9k_vif_iter_data *iter_data = data;
142 int i;
143
144 for (i = 0; i < ETH_ALEN; i++)
145 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
146}
147
148static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
149 struct ieee80211_vif *vif)
150{
151 struct ath_common *common = ath9k_hw_common(priv->ah);
152 struct ath9k_vif_iter_data iter_data;
153
154 /*
155 * Use the hardware MAC address as reference, the hardware uses it
156 * together with the BSSID mask when matching addresses.
157 */
158 iter_data.hw_macaddr = common->macaddr;
159 memset(&iter_data.mask, 0xff, ETH_ALEN);
160
161 if (vif)
162 ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
163
164 /* Get list of all active MAC addresses */
165 ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
166 &iter_data);
167
168 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
169 ath_hw_setbssidmask(common);
170}
171
172static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
173{
174 if (priv->num_ibss_vif)
175 priv->ah->opmode = NL80211_IFTYPE_ADHOC;
176 else if (priv->num_ap_vif)
177 priv->ah->opmode = NL80211_IFTYPE_AP;
178 else
179 priv->ah->opmode = NL80211_IFTYPE_STATION;
180
181 ath9k_hw_setopmode(priv->ah);
182}
183
108void ath9k_htc_reset(struct ath9k_htc_priv *priv) 184void ath9k_htc_reset(struct ath9k_htc_priv *priv)
109{ 185{
110 struct ath_hw *ah = priv->ah; 186 struct ath_hw *ah = priv->ah;
@@ -119,9 +195,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
119 mutex_lock(&priv->mutex); 195 mutex_lock(&priv->mutex);
120 ath9k_htc_ps_wakeup(priv); 196 ath9k_htc_ps_wakeup(priv);
121 197
122 if (priv->op_flags & OP_ASSOCIATED) 198 ath9k_htc_stop_ani(priv);
123 cancel_delayed_work_sync(&priv->ath9k_ani_work);
124
125 ieee80211_stop_queues(priv->hw); 199 ieee80211_stop_queues(priv->hw);
126 htc_stop(priv->htc); 200 htc_stop(priv->htc);
127 WMI_CMD(WMI_DISABLE_INTR_CMDID); 201 WMI_CMD(WMI_DISABLE_INTR_CMDID);
@@ -148,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
148 222
149 WMI_CMD(WMI_ENABLE_INTR_CMDID); 223 WMI_CMD(WMI_ENABLE_INTR_CMDID);
150 htc_start(priv->htc); 224 htc_start(priv->htc);
151 225 ath9k_htc_vif_reconfig(priv);
152 if (priv->op_flags & OP_ASSOCIATED) {
153 ath9k_htc_beacon_config(priv, priv->vif);
154 ath_start_ani(priv);
155 }
156
157 ieee80211_wake_queues(priv->hw); 226 ieee80211_wake_queues(priv->hw);
158 227
159 ath9k_htc_ps_restore(priv); 228 ath9k_htc_ps_restore(priv);
@@ -222,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
222 goto err; 291 goto err;
223 292
224 htc_start(priv->htc); 293 htc_start(priv->htc);
294
295 if (!(priv->op_flags & OP_SCANNING) &&
296 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
297 ath9k_htc_vif_reconfig(priv);
298
225err: 299err:
226 ath9k_htc_ps_restore(priv); 300 ath9k_htc_ps_restore(priv);
227 return ret; 301 return ret;
228} 302}
229 303
304/*
305 * Monitor mode handling is a tad complicated because the firmware requires
306 * an interface to be created exclusively, while mac80211 doesn't associate
307 * an interface with the mode.
308 *
309 * So, for now, only one monitor interface can be configured.
310 */
230static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) 311static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
231{ 312{
232 struct ath_common *common = ath9k_hw_common(priv->ah); 313 struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -236,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
236 317
237 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 318 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
238 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 319 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
239 hvif.index = 0; /* Should do for now */ 320 hvif.index = priv->mon_vif_idx;
240 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 321 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
241 priv->nvifs--; 322 priv->nvifs--;
323 priv->vif_slot &= ~(1 << priv->mon_vif_idx);
242} 324}
243 325
244static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) 326static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
@@ -246,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
246 struct ath_common *common = ath9k_hw_common(priv->ah); 328 struct ath_common *common = ath9k_hw_common(priv->ah);
247 struct ath9k_htc_target_vif hvif; 329 struct ath9k_htc_target_vif hvif;
248 struct ath9k_htc_target_sta tsta; 330 struct ath9k_htc_target_sta tsta;
249 int ret = 0; 331 int ret = 0, sta_idx;
250 u8 cmd_rsp; 332 u8 cmd_rsp;
251 333
252 if (priv->nvifs > 0) 334 if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
253 return -ENOBUFS; 335 (priv->nstations >= ATH9K_HTC_MAX_STA)) {
336 ret = -ENOBUFS;
337 goto err_vif;
338 }
254 339
255 if (priv->nstations >= ATH9K_HTC_MAX_STA) 340 sta_idx = ffz(priv->sta_slot);
256 return -ENOBUFS; 341 if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
342 ret = -ENOBUFS;
343 goto err_vif;
344 }
257 345
258 /* 346 /*
259 * Add an interface. 347 * Add an interface.
260 */ 348 */
261
262 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 349 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
263 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 350 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
264 351
265 hvif.opmode = cpu_to_be32(HTC_M_MONITOR); 352 hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
266 priv->ah->opmode = NL80211_IFTYPE_MONITOR; 353 hvif.index = ffz(priv->vif_slot);
267 hvif.index = priv->nvifs;
268 354
269 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); 355 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
270 if (ret) 356 if (ret)
271 return ret; 357 goto err_vif;
358
359 /*
360 * Assign the monitor interface index as a special case here.
361 * This is needed when the interface is brought down.
362 */
363 priv->mon_vif_idx = hvif.index;
364 priv->vif_slot |= (1 << hvif.index);
365
366 /*
367 * Set the hardware mode to monitor only if there are no
368 * other interfaces.
369 */
370 if (!priv->nvifs)
371 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
272 372
273 priv->nvifs++; 373 priv->nvifs++;
274 374
275 /* 375 /*
276 * Associate a station with the interface for packet injection. 376 * Associate a station with the interface for packet injection.
277 */ 377 */
278
279 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); 378 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
280 379
281 memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN); 380 memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
282 381
283 tsta.is_vif_sta = 1; 382 tsta.is_vif_sta = 1;
284 tsta.sta_index = priv->nstations; 383 tsta.sta_index = sta_idx;
285 tsta.vif_index = hvif.index; 384 tsta.vif_index = hvif.index;
286 tsta.maxampdu = 0xffff; 385 tsta.maxampdu = 0xffff;
287 386
288 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 387 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
289 if (ret) { 388 if (ret) {
290 ath_err(common, "Unable to add station entry for monitor mode\n"); 389 ath_err(common, "Unable to add station entry for monitor mode\n");
291 goto err_vif; 390 goto err_sta;
292 } 391 }
293 392
393 priv->sta_slot |= (1 << sta_idx);
294 priv->nstations++; 394 priv->nstations++;
295 395 priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
296 /*
297 * Set chainmask etc. on the target.
298 */
299 ret = ath9k_htc_update_cap_target(priv);
300 if (ret)
301 ath_dbg(common, ATH_DBG_CONFIG,
302 "Failed to update capability in target\n");
303
304 priv->ah->is_monitoring = true; 396 priv->ah->is_monitoring = true;
305 397
398 ath_dbg(common, ATH_DBG_CONFIG,
399 "Attached a monitor interface at idx: %d, sta idx: %d\n",
400 priv->mon_vif_idx, sta_idx);
401
306 return 0; 402 return 0;
307 403
308err_vif: 404err_sta:
309 /* 405 /*
310 * Remove the interface from the target. 406 * Remove the interface from the target.
311 */ 407 */
312 __ath9k_htc_remove_monitor_interface(priv); 408 __ath9k_htc_remove_monitor_interface(priv);
409err_vif:
410 ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
411
313 return ret; 412 return ret;
314} 413}
315 414
@@ -321,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
321 420
322 __ath9k_htc_remove_monitor_interface(priv); 421 __ath9k_htc_remove_monitor_interface(priv);
323 422
324 sta_idx = 0; /* Only single interface, for now */ 423 sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
325 424
326 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); 425 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
327 if (ret) { 426 if (ret) {
@@ -329,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
329 return ret; 428 return ret;
330 } 429 }
331 430
431 priv->sta_slot &= ~(1 << sta_idx);
332 priv->nstations--; 432 priv->nstations--;
333 priv->ah->is_monitoring = false; 433 priv->ah->is_monitoring = false;
334 434
435 ath_dbg(common, ATH_DBG_CONFIG,
436 "Removed a monitor interface at idx: %d, sta idx: %d\n",
437 priv->mon_vif_idx, sta_idx);
438
335 return 0; 439 return 0;
336} 440}
337 441
@@ -343,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
343 struct ath9k_htc_target_sta tsta; 447 struct ath9k_htc_target_sta tsta;
344 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; 448 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
345 struct ath9k_htc_sta *ista; 449 struct ath9k_htc_sta *ista;
346 int ret; 450 int ret, sta_idx;
347 u8 cmd_rsp; 451 u8 cmd_rsp;
348 452
349 if (priv->nstations >= ATH9K_HTC_MAX_STA) 453 if (priv->nstations >= ATH9K_HTC_MAX_STA)
350 return -ENOBUFS; 454 return -ENOBUFS;
351 455
456 sta_idx = ffz(priv->sta_slot);
457 if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
458 return -ENOBUFS;
459
352 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); 460 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
353 461
354 if (sta) { 462 if (sta) {
@@ -358,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
358 tsta.associd = common->curaid; 466 tsta.associd = common->curaid;
359 tsta.is_vif_sta = 0; 467 tsta.is_vif_sta = 0;
360 tsta.valid = true; 468 tsta.valid = true;
361 ista->index = priv->nstations; 469 ista->index = sta_idx;
362 } else { 470 } else {
363 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); 471 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
364 tsta.is_vif_sta = 1; 472 tsta.is_vif_sta = 1;
365 } 473 }
366 474
367 tsta.sta_index = priv->nstations; 475 tsta.sta_index = sta_idx;
368 tsta.vif_index = avp->index; 476 tsta.vif_index = avp->index;
369 tsta.maxampdu = 0xffff; 477 tsta.maxampdu = 0xffff;
370 if (sta && sta->ht_cap.ht_supported) 478 if (sta && sta->ht_cap.ht_supported)
@@ -379,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
379 return ret; 487 return ret;
380 } 488 }
381 489
382 if (sta) 490 if (sta) {
383 ath_dbg(common, ATH_DBG_CONFIG, 491 ath_dbg(common, ATH_DBG_CONFIG,
384 "Added a station entry for: %pM (idx: %d)\n", 492 "Added a station entry for: %pM (idx: %d)\n",
385 sta->addr, tsta.sta_index); 493 sta->addr, tsta.sta_index);
494 } else {
495 ath_dbg(common, ATH_DBG_CONFIG,
496 "Added a station entry for VIF %d (idx: %d)\n",
497 avp->index, tsta.sta_index);
498 }
386 499
500 priv->sta_slot |= (1 << sta_idx);
387 priv->nstations++; 501 priv->nstations++;
502 if (!sta)
503 priv->vif_sta_pos[avp->index] = sta_idx;
504
388 return 0; 505 return 0;
389} 506}
390 507
@@ -393,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
393 struct ieee80211_sta *sta) 510 struct ieee80211_sta *sta)
394{ 511{
395 struct ath_common *common = ath9k_hw_common(priv->ah); 512 struct ath_common *common = ath9k_hw_common(priv->ah);
513 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
396 struct ath9k_htc_sta *ista; 514 struct ath9k_htc_sta *ista;
397 int ret; 515 int ret;
398 u8 cmd_rsp, sta_idx; 516 u8 cmd_rsp, sta_idx;
@@ -401,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
401 ista = (struct ath9k_htc_sta *) sta->drv_priv; 519 ista = (struct ath9k_htc_sta *) sta->drv_priv;
402 sta_idx = ista->index; 520 sta_idx = ista->index;
403 } else { 521 } else {
404 sta_idx = 0; 522 sta_idx = priv->vif_sta_pos[avp->index];
405 } 523 }
406 524
407 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); 525 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
@@ -413,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
413 return ret; 531 return ret;
414 } 532 }
415 533
416 if (sta) 534 if (sta) {
417 ath_dbg(common, ATH_DBG_CONFIG, 535 ath_dbg(common, ATH_DBG_CONFIG,
418 "Removed a station entry for: %pM (idx: %d)\n", 536 "Removed a station entry for: %pM (idx: %d)\n",
419 sta->addr, sta_idx); 537 sta->addr, sta_idx);
538 } else {
539 ath_dbg(common, ATH_DBG_CONFIG,
540 "Removed a station entry for VIF %d (idx: %d)\n",
541 avp->index, sta_idx);
542 }
420 543
544 priv->sta_slot &= ~(1 << sta_idx);
421 priv->nstations--; 545 priv->nstations--;
546
422 return 0; 547 return 0;
423} 548}
424 549
@@ -800,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
800/* ANI */ 925/* ANI */
801/*******/ 926/*******/
802 927
803void ath_start_ani(struct ath9k_htc_priv *priv) 928void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
804{ 929{
805 struct ath_common *common = ath9k_hw_common(priv->ah); 930 struct ath_common *common = ath9k_hw_common(priv->ah);
806 unsigned long timestamp = jiffies_to_msecs(jiffies); 931 unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -809,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv)
809 common->ani.shortcal_timer = timestamp; 934 common->ani.shortcal_timer = timestamp;
810 common->ani.checkani_timer = timestamp; 935 common->ani.checkani_timer = timestamp;
811 936
812 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, 937 priv->op_flags |= OP_ANI_RUNNING;
938
939 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
813 msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 940 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
814} 941}
815 942
816void ath9k_ani_work(struct work_struct *work) 943void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
944{
945 cancel_delayed_work_sync(&priv->ani_work);
946 priv->op_flags &= ~OP_ANI_RUNNING;
947}
948
949void ath9k_htc_ani_work(struct work_struct *work)
817{ 950{
818 struct ath9k_htc_priv *priv = 951 struct ath9k_htc_priv *priv =
819 container_of(work, struct ath9k_htc_priv, 952 container_of(work, struct ath9k_htc_priv, ani_work.work);
820 ath9k_ani_work.work);
821 struct ath_hw *ah = priv->ah; 953 struct ath_hw *ah = priv->ah;
822 struct ath_common *common = ath9k_hw_common(ah); 954 struct ath_common *common = ath9k_hw_common(ah);
823 bool longcal = false; 955 bool longcal = false;
@@ -826,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
826 unsigned int timestamp = jiffies_to_msecs(jiffies); 958 unsigned int timestamp = jiffies_to_msecs(jiffies);
827 u32 cal_interval, short_cal_interval; 959 u32 cal_interval, short_cal_interval;
828 960
829 short_cal_interval = ATH_STA_SHORT_CALINTERVAL; 961 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
962 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
830 963
831 /* Only calibrate if awake */ 964 /* Only calibrate if awake */
832 if (ah->power_mode != ATH9K_PM_AWAKE) 965 if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -895,7 +1028,7 @@ set_timer:
895 if (!common->ani.caldone) 1028 if (!common->ani.caldone)
896 cal_interval = min(cal_interval, (u32)short_cal_interval); 1029 cal_interval = min(cal_interval, (u32)short_cal_interval);
897 1030
898 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, 1031 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
899 msecs_to_jiffies(cal_interval)); 1032 msecs_to_jiffies(cal_interval));
900} 1033}
901 1034
@@ -903,7 +1036,7 @@ set_timer:
903/* mac80211 Callbacks */ 1036/* mac80211 Callbacks */
904/**********************/ 1037/**********************/
905 1038
906static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1039static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
907{ 1040{
908 struct ieee80211_hdr *hdr; 1041 struct ieee80211_hdr *hdr;
909 struct ath9k_htc_priv *priv = hw->priv; 1042 struct ath9k_htc_priv *priv = hw->priv;
@@ -916,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
916 padsize = padpos & 3; 1049 padsize = padpos & 3;
917 if (padsize && skb->len > padpos) { 1050 if (padsize && skb->len > padpos) {
918 if (skb_headroom(skb) < padsize) 1051 if (skb_headroom(skb) < padsize)
919 return -1; 1052 goto fail_tx;
920 skb_push(skb, padsize); 1053 skb_push(skb, padsize);
921 memmove(skb->data, skb->data + padsize, padpos); 1054 memmove(skb->data, skb->data + padsize, padpos);
922 } 1055 }
@@ -937,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
937 goto fail_tx; 1070 goto fail_tx;
938 } 1071 }
939 1072
940 return 0; 1073 return;
941 1074
942fail_tx: 1075fail_tx:
943 dev_kfree_skb_any(skb); 1076 dev_kfree_skb_any(skb);
944 return 0;
945} 1077}
946 1078
947static int ath9k_htc_start(struct ieee80211_hw *hw) 1079static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -990,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
990 1122
991 ath9k_host_rx_init(priv); 1123 ath9k_host_rx_init(priv);
992 1124
1125 ret = ath9k_htc_update_cap_target(priv);
1126 if (ret)
1127 ath_dbg(common, ATH_DBG_CONFIG,
1128 "Failed to update capability in target\n");
1129
993 priv->op_flags &= ~OP_INVALID; 1130 priv->op_flags &= ~OP_INVALID;
994 htc_start(priv->htc); 1131 htc_start(priv->htc);
995 1132
@@ -1044,26 +1181,21 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1044 cancel_work_sync(&priv->fatal_work); 1181 cancel_work_sync(&priv->fatal_work);
1045 cancel_work_sync(&priv->ps_work); 1182 cancel_work_sync(&priv->ps_work);
1046 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1183 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1047 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1184 ath9k_htc_stop_ani(priv);
1048 ath9k_led_stop_brightness(priv); 1185 ath9k_led_stop_brightness(priv);
1049 1186
1050 mutex_lock(&priv->mutex); 1187 mutex_lock(&priv->mutex);
1051 1188
1052 /* Remove monitor interface here */
1053 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1054 if (ath9k_htc_remove_monitor_interface(priv))
1055 ath_err(common, "Unable to remove monitor interface\n");
1056 else
1057 ath_dbg(common, ATH_DBG_CONFIG,
1058 "Monitor interface removed\n");
1059 }
1060
1061 if (ah->btcoex_hw.enabled) { 1189 if (ah->btcoex_hw.enabled) {
1062 ath9k_hw_btcoex_disable(ah); 1190 ath9k_hw_btcoex_disable(ah);
1063 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 1191 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1064 ath_htc_cancel_btcoex_work(priv); 1192 ath_htc_cancel_btcoex_work(priv);
1065 } 1193 }
1066 1194
1195 /* Remove a monitor interface if it's present. */
1196 if (priv->ah->is_monitoring)
1197 ath9k_htc_remove_monitor_interface(priv);
1198
1067 ath9k_hw_phy_disable(ah); 1199 ath9k_hw_phy_disable(ah);
1068 ath9k_hw_disable(ah); 1200 ath9k_hw_disable(ah);
1069 ath9k_htc_ps_restore(priv); 1201 ath9k_htc_ps_restore(priv);
@@ -1087,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1087 1219
1088 mutex_lock(&priv->mutex); 1220 mutex_lock(&priv->mutex);
1089 1221
1090 /* Only one interface for now */ 1222 if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
1091 if (priv->nvifs > 0) { 1223 mutex_unlock(&priv->mutex);
1092 ret = -ENOBUFS; 1224 return -ENOBUFS;
1093 goto out; 1225 }
1226
1227 if (priv->num_ibss_vif ||
1228 (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
1229 ath_err(common, "IBSS coexistence with other modes is not allowed\n");
1230 mutex_unlock(&priv->mutex);
1231 return -ENOBUFS;
1232 }
1233
1234 if (((vif->type == NL80211_IFTYPE_AP) ||
1235 (vif->type == NL80211_IFTYPE_ADHOC)) &&
1236 ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
1237 ath_err(common, "Max. number of beaconing interfaces reached\n");
1238 mutex_unlock(&priv->mutex);
1239 return -ENOBUFS;
1094 } 1240 }
1095 1241
1096 ath9k_htc_ps_wakeup(priv); 1242 ath9k_htc_ps_wakeup(priv);
@@ -1104,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1104 case NL80211_IFTYPE_ADHOC: 1250 case NL80211_IFTYPE_ADHOC:
1105 hvif.opmode = cpu_to_be32(HTC_M_IBSS); 1251 hvif.opmode = cpu_to_be32(HTC_M_IBSS);
1106 break; 1252 break;
1253 case NL80211_IFTYPE_AP:
1254 hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
1255 break;
1107 default: 1256 default:
1108 ath_err(common, 1257 ath_err(common,
1109 "Interface type %d not yet supported\n", vif->type); 1258 "Interface type %d not yet supported\n", vif->type);
@@ -1111,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1111 goto out; 1260 goto out;
1112 } 1261 }
1113 1262
1114 ath_dbg(common, ATH_DBG_CONFIG,
1115 "Attach a VIF of type: %d\n", vif->type);
1116
1117 priv->ah->opmode = vif->type;
1118
1119 /* Index starts from zero on the target */ 1263 /* Index starts from zero on the target */
1120 avp->index = hvif.index = priv->nvifs; 1264 avp->index = hvif.index = ffz(priv->vif_slot);
1121 hvif.rtsthreshold = cpu_to_be16(2304); 1265 hvif.rtsthreshold = cpu_to_be16(2304);
1122 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); 1266 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
1123 if (ret) 1267 if (ret)
1124 goto out; 1268 goto out;
1125 1269
1126 priv->nvifs++;
1127
1128 /* 1270 /*
1129 * We need a node in target to tx mgmt frames 1271 * We need a node in target to tx mgmt frames
1130 * before association. 1272 * before association.
1131 */ 1273 */
1132 ret = ath9k_htc_add_station(priv, vif, NULL); 1274 ret = ath9k_htc_add_station(priv, vif, NULL);
1133 if (ret) 1275 if (ret) {
1276 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1134 goto out; 1277 goto out;
1278 }
1135 1279
1136 ret = ath9k_htc_update_cap_target(priv); 1280 ath9k_htc_set_bssid_mask(priv, vif);
1137 if (ret)
1138 ath_dbg(common, ATH_DBG_CONFIG,
1139 "Failed to update capability in target\n");
1140 1281
1282 priv->vif_slot |= (1 << avp->index);
1283 priv->nvifs++;
1141 priv->vif = vif; 1284 priv->vif = vif;
1285
1286 INC_VIF(priv, vif->type);
1287 ath9k_htc_set_opmode(priv);
1288
1289 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1290 !(priv->op_flags & OP_ANI_RUNNING))
1291 ath9k_htc_start_ani(priv);
1292
1293 ath_dbg(common, ATH_DBG_CONFIG,
1294 "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
1295
1142out: 1296out:
1143 ath9k_htc_ps_restore(priv); 1297 ath9k_htc_ps_restore(priv);
1144 mutex_unlock(&priv->mutex); 1298 mutex_unlock(&priv->mutex);
@@ -1156,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1156 int ret = 0; 1310 int ret = 0;
1157 u8 cmd_rsp; 1311 u8 cmd_rsp;
1158 1312
1159 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1160
1161 mutex_lock(&priv->mutex); 1313 mutex_lock(&priv->mutex);
1162 ath9k_htc_ps_wakeup(priv); 1314 ath9k_htc_ps_wakeup(priv);
1163 1315
@@ -1166,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1166 hvif.index = avp->index; 1318 hvif.index = avp->index;
1167 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 1319 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1168 priv->nvifs--; 1320 priv->nvifs--;
1321 priv->vif_slot &= ~(1 << avp->index);
1169 1322
1170 ath9k_htc_remove_station(priv, vif, NULL); 1323 ath9k_htc_remove_station(priv, vif, NULL);
1171 priv->vif = NULL; 1324 priv->vif = NULL;
1172 1325
1326 DEC_VIF(priv, vif->type);
1327 ath9k_htc_set_opmode(priv);
1328
1329 /*
1330 * Stop ANI only if there are no associated station interfaces.
1331 */
1332 if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
1333 priv->rearm_ani = false;
1334 ieee80211_iterate_active_interfaces_atomic(priv->hw,
1335 ath9k_htc_vif_iter, priv);
1336 if (!priv->rearm_ani)
1337 ath9k_htc_stop_ani(priv);
1338 }
1339
1340 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
1341
1173 ath9k_htc_ps_restore(priv); 1342 ath9k_htc_ps_restore(priv);
1174 mutex_unlock(&priv->mutex); 1343 mutex_unlock(&priv->mutex);
1175} 1344}
@@ -1205,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1205 * IEEE80211_CONF_CHANGE_CHANNEL is handled. 1374 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
1206 */ 1375 */
1207 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1376 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1208 if (conf->flags & IEEE80211_CONF_MONITOR) { 1377 if ((conf->flags & IEEE80211_CONF_MONITOR) &&
1209 if (ath9k_htc_add_monitor_interface(priv)) 1378 !priv->ah->is_monitoring)
1210 ath_err(common, "Failed to set monitor mode\n"); 1379 ath9k_htc_add_monitor_interface(priv);
1211 else 1380 else if (priv->ah->is_monitoring)
1212 ath_dbg(common, ATH_DBG_CONFIG, 1381 ath9k_htc_remove_monitor_interface(priv);
1213 "HW opmode set to Monitor mode\n");
1214 }
1215 } 1382 }
1216 1383
1217 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1384 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -1434,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1434 struct ath9k_htc_priv *priv = hw->priv; 1601 struct ath9k_htc_priv *priv = hw->priv;
1435 struct ath_hw *ah = priv->ah; 1602 struct ath_hw *ah = priv->ah;
1436 struct ath_common *common = ath9k_hw_common(ah); 1603 struct ath_common *common = ath9k_hw_common(ah);
1604 bool set_assoc;
1437 1605
1438 mutex_lock(&priv->mutex); 1606 mutex_lock(&priv->mutex);
1439 ath9k_htc_ps_wakeup(priv); 1607 ath9k_htc_ps_wakeup(priv);
1440 1608
1609 /*
1610 * Set the HW AID/BSSID only for the first station interface
1611 * or in IBSS mode.
1612 */
1613 set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
1614 ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
1615 (priv->num_sta_vif == 1)));
1616
1617
1441 if (changed & BSS_CHANGED_ASSOC) { 1618 if (changed & BSS_CHANGED_ASSOC) {
1442 common->curaid = bss_conf->assoc ? 1619 if (set_assoc) {
1443 bss_conf->aid : 0; 1620 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1444 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1621 bss_conf->assoc);
1445 bss_conf->assoc); 1622
1446 1623 common->curaid = bss_conf->assoc ?
1447 if (bss_conf->assoc) { 1624 bss_conf->aid : 0;
1448 priv->op_flags |= OP_ASSOCIATED; 1625
1449 ath_start_ani(priv); 1626 if (bss_conf->assoc)
1450 } else { 1627 ath9k_htc_start_ani(priv);
1451 priv->op_flags &= ~OP_ASSOCIATED; 1628 else
1452 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1629 ath9k_htc_stop_ani(priv);
1453 } 1630 }
1454 } 1631 }
1455 1632
1456 if (changed & BSS_CHANGED_BSSID) { 1633 if (changed & BSS_CHANGED_BSSID) {
1457 /* Set BSSID */ 1634 if (set_assoc) {
1458 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1635 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1459 ath9k_hw_write_associd(ah); 1636 ath9k_hw_write_associd(ah);
1460 1637
1461 ath_dbg(common, ATH_DBG_CONFIG, 1638 ath_dbg(common, ATH_DBG_CONFIG,
1462 "BSSID: %pM aid: 0x%x\n", 1639 "BSSID: %pM aid: 0x%x\n",
1463 common->curbssid, common->curaid); 1640 common->curbssid, common->curaid);
1641 }
1464 } 1642 }
1465 1643
1466 if ((changed & BSS_CHANGED_BEACON_INT) || 1644 if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
1467 (changed & BSS_CHANGED_BEACON) || 1645 ath_dbg(common, ATH_DBG_CONFIG,
1468 ((changed & BSS_CHANGED_BEACON_ENABLED) && 1646 "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
1469 bss_conf->enable_beacon)) {
1470 priv->op_flags |= OP_ENABLE_BEACON; 1647 priv->op_flags |= OP_ENABLE_BEACON;
1471 ath9k_htc_beacon_config(priv, vif); 1648 ath9k_htc_beacon_config(priv, vif);
1472 } 1649 }
1473 1650
1474 if ((changed & BSS_CHANGED_BEACON_ENABLED) && 1651 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
1475 !bss_conf->enable_beacon) { 1652 /*
1476 priv->op_flags &= ~OP_ENABLE_BEACON; 1653 * Disable SWBA interrupt only if there are no
1477 ath9k_htc_beacon_config(priv, vif); 1654 * AP/IBSS interfaces.
1478 } 1655 */
1479 1656 if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
1480 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 1657 ath_dbg(common, ATH_DBG_CONFIG,
1481 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 1658 "Beacon disabled for BSS: %pM\n",
1482 bss_conf->use_short_preamble); 1659 bss_conf->bssid);
1483 if (bss_conf->use_short_preamble) 1660 priv->op_flags &= ~OP_ENABLE_BEACON;
1484 priv->op_flags |= OP_PREAMBLE_SHORT; 1661 ath9k_htc_beacon_config(priv, vif);
1485 else 1662 }
1486 priv->op_flags &= ~OP_PREAMBLE_SHORT;
1487 } 1663 }
1488 1664
1489 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1665 if (changed & BSS_CHANGED_BEACON_INT) {
1490 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 1666 /*
1491 bss_conf->use_cts_prot); 1667 * Reset the HW TSF for the first AP interface.
1492 if (bss_conf->use_cts_prot && 1668 */
1493 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1669 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1494 priv->op_flags |= OP_PROTECT_ENABLE; 1670 (priv->nvifs == 1) &&
1495 else 1671 (priv->num_ap_vif == 1) &&
1496 priv->op_flags &= ~OP_PROTECT_ENABLE; 1672 (vif->type == NL80211_IFTYPE_AP)) {
1673 priv->op_flags |= OP_TSF_RESET;
1674 }
1675 ath_dbg(common, ATH_DBG_CONFIG,
1676 "Beacon interval changed for BSS: %pM\n",
1677 bss_conf->bssid);
1678 ath9k_htc_beacon_config(priv, vif);
1497 } 1679 }
1498 1680
1499 if (changed & BSS_CHANGED_ERP_SLOT) { 1681 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1558,6 +1740,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1558 struct ath9k_htc_sta *ista; 1740 struct ath9k_htc_sta *ista;
1559 int ret = 0; 1741 int ret = 0;
1560 1742
1743 mutex_lock(&priv->mutex);
1744
1561 switch (action) { 1745 switch (action) {
1562 case IEEE80211_AMPDU_RX_START: 1746 case IEEE80211_AMPDU_RX_START:
1563 break; 1747 break;
@@ -1582,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1582 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n"); 1766 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
1583 } 1767 }
1584 1768
1769 mutex_unlock(&priv->mutex);
1770
1585 return ret; 1771 return ret;
1586} 1772}
1587 1773
@@ -1594,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1594 priv->op_flags |= OP_SCANNING; 1780 priv->op_flags |= OP_SCANNING;
1595 spin_unlock_bh(&priv->beacon_lock); 1781 spin_unlock_bh(&priv->beacon_lock);
1596 cancel_work_sync(&priv->ps_work); 1782 cancel_work_sync(&priv->ps_work);
1597 if (priv->op_flags & OP_ASSOCIATED) 1783 ath9k_htc_stop_ani(priv);
1598 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1599 mutex_unlock(&priv->mutex); 1784 mutex_unlock(&priv->mutex);
1600} 1785}
1601 1786
@@ -1604,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1604 struct ath9k_htc_priv *priv = hw->priv; 1789 struct ath9k_htc_priv *priv = hw->priv;
1605 1790
1606 mutex_lock(&priv->mutex); 1791 mutex_lock(&priv->mutex);
1607 ath9k_htc_ps_wakeup(priv);
1608 spin_lock_bh(&priv->beacon_lock); 1792 spin_lock_bh(&priv->beacon_lock);
1609 priv->op_flags &= ~OP_SCANNING; 1793 priv->op_flags &= ~OP_SCANNING;
1610 spin_unlock_bh(&priv->beacon_lock); 1794 spin_unlock_bh(&priv->beacon_lock);
1611 if (priv->op_flags & OP_ASSOCIATED) { 1795 ath9k_htc_ps_wakeup(priv);
1612 ath9k_htc_beacon_config(priv, priv->vif); 1796 ath9k_htc_vif_reconfig(priv);
1613 ath_start_ani(priv);
1614 }
1615 ath9k_htc_ps_restore(priv); 1797 ath9k_htc_ps_restore(priv);
1616 mutex_unlock(&priv->mutex); 1798 mutex_unlock(&priv->mutex);
1617} 1799}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 7a5ffca21958..4a4f27ba96af 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
84 struct ieee80211_hdr *hdr; 84 struct ieee80211_hdr *hdr;
85 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 85 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
86 struct ieee80211_sta *sta = tx_info->control.sta; 86 struct ieee80211_sta *sta = tx_info->control.sta;
87 struct ieee80211_vif *vif = tx_info->control.vif;
87 struct ath9k_htc_sta *ista; 88 struct ath9k_htc_sta *ista;
89 struct ath9k_htc_vif *avp;
88 struct ath9k_htc_tx_ctl tx_ctl; 90 struct ath9k_htc_tx_ctl tx_ctl;
89 enum htc_endpoint_id epid; 91 enum htc_endpoint_id epid;
90 u16 qnum; 92 u16 qnum;
@@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
95 hdr = (struct ieee80211_hdr *) skb->data; 97 hdr = (struct ieee80211_hdr *) skb->data;
96 fc = hdr->frame_control; 98 fc = hdr->frame_control;
97 99
98 if (tx_info->control.vif && 100 /*
99 (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) 101 * Find out on which interface this packet has to be
100 vif_idx = ((struct ath9k_htc_vif *) 102 * sent out.
101 tx_info->control.vif->drv_priv)->index; 103 */
102 else 104 if (vif) {
103 vif_idx = priv->nvifs; 105 avp = (struct ath9k_htc_vif *) vif->drv_priv;
106 vif_idx = avp->index;
107 } else {
108 if (!priv->ah->is_monitoring) {
109 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
110 "VIF is null, but no monitor interface !\n");
111 return -EINVAL;
112 }
104 113
114 vif_idx = priv->mon_vif_idx;
115 }
116
117 /*
118 * Find out which station this packet is destined for.
119 */
105 if (sta) { 120 if (sta) {
106 ista = (struct ath9k_htc_sta *) sta->drv_priv; 121 ista = (struct ath9k_htc_sta *) sta->drv_priv;
107 sta_idx = ista->index; 122 sta_idx = ista->index;
108 } else { 123 } else {
109 sta_idx = 0; 124 sta_idx = priv->vif_sta_pos[vif_idx];
110 } 125 }
111 126
112 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl)); 127 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
@@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
141 156
142 /* CTS-to-self */ 157 /* CTS-to-self */
143 if (!(flags & ATH9K_HTC_TX_RTSCTS) && 158 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
144 (priv->op_flags & OP_PROTECT_ENABLE)) 159 (vif && vif->bss_conf.use_cts_prot))
145 flags |= ATH9K_HTC_TX_CTSONLY; 160 flags |= ATH9K_HTC_TX_CTSONLY;
146 161
147 tx_hdr.flags = cpu_to_be32(flags); 162 tx_hdr.flags = cpu_to_be32(flags);
@@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
217void ath9k_tx_tasklet(unsigned long data) 232void ath9k_tx_tasklet(unsigned long data)
218{ 233{
219 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 234 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
235 struct ieee80211_vif *vif;
220 struct ieee80211_sta *sta; 236 struct ieee80211_sta *sta;
221 struct ieee80211_hdr *hdr; 237 struct ieee80211_hdr *hdr;
222 struct ieee80211_tx_info *tx_info; 238 struct ieee80211_tx_info *tx_info;
@@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
228 hdr = (struct ieee80211_hdr *) skb->data; 244 hdr = (struct ieee80211_hdr *) skb->data;
229 fc = hdr->frame_control; 245 fc = hdr->frame_control;
230 tx_info = IEEE80211_SKB_CB(skb); 246 tx_info = IEEE80211_SKB_CB(skb);
247 vif = tx_info->control.vif;
231 248
232 memset(&tx_info->status, 0, sizeof(tx_info->status)); 249 memset(&tx_info->status, 0, sizeof(tx_info->status));
233 250
251 if (!vif)
252 goto send_mac80211;
253
234 rcu_read_lock(); 254 rcu_read_lock();
235 255
236 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 256 sta = ieee80211_find_sta(vif, hdr->addr1);
237 if (!sta) { 257 if (!sta) {
238 rcu_read_unlock(); 258 rcu_read_unlock();
239 ieee80211_tx_status(priv->hw, skb); 259 ieee80211_tx_status(priv->hw, skb);
@@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
263 283
264 rcu_read_unlock(); 284 rcu_read_unlock();
265 285
286 send_mac80211:
266 /* Send status to mac80211 */ 287 /* Send status to mac80211 */
267 ieee80211_tx_status(priv->hw, skb); 288 ieee80211_tx_status(priv->hw, skb);
268 } 289 }
@@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
386 */ 407 */
387 if (((ah->opmode != NL80211_IFTYPE_AP) && 408 if (((ah->opmode != NL80211_IFTYPE_AP) &&
388 (priv->rxfilter & FIF_PROMISC_IN_BSS)) || 409 (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
389 (ah->opmode == NL80211_IFTYPE_MONITOR)) 410 ah->is_monitoring)
390 rfilt |= ATH9K_RX_FILTER_PROM; 411 rfilt |= ATH9K_RX_FILTER_PROM;
391 412
392 if (priv->rxfilter & FIF_CONTROL) 413 if (priv->rxfilter & FIF_CONTROL)
@@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
398 else 419 else
399 rfilt |= ATH9K_RX_FILTER_BEACON; 420 rfilt |= ATH9K_RX_FILTER_BEACON;
400 421
401 if (conf_is_ht(&priv->hw->conf)) 422 if (conf_is_ht(&priv->hw->conf)) {
402 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 423 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
424 rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
425 }
426
427 if (priv->rxfilter & FIF_PSPOLL)
428 rfilt |= ATH9K_RX_FILTER_PSPOLL;
403 429
404 return rfilt; 430 return rfilt;
405 431
@@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
412static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv) 438static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
413{ 439{
414 struct ath_hw *ah = priv->ah; 440 struct ath_hw *ah = priv->ah;
415 struct ath_common *common = ath9k_hw_common(ah);
416
417 u32 rfilt, mfilt[2]; 441 u32 rfilt, mfilt[2];
418 442
419 /* configure rx filter */ 443 /* configure rx filter */
420 rfilt = ath9k_htc_calcrxfilter(priv); 444 rfilt = ath9k_htc_calcrxfilter(priv);
421 ath9k_hw_setrxfilter(ah, rfilt); 445 ath9k_hw_setrxfilter(ah, rfilt);
422 446
423 /* configure bssid mask */
424 ath_hw_setbssidmask(common);
425
426 /* configure operational mode */
427 ath9k_hw_setopmode(ah);
428
429 /* calculate and install multicast filter */ 447 /* calculate and install multicast filter */
430 mfilt[0] = mfilt[1] = ~0; 448 mfilt[0] = mfilt[1] = ~0;
431 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 449 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
576 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate, 594 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
577 rxbuf->rxstatus.rs_flags); 595 rxbuf->rxstatus.rs_flags);
578 596
579 if (priv->op_flags & OP_ASSOCIATED) { 597 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
580 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD && 598 !rxbuf->rxstatus.rs_moreaggr)
581 !rxbuf->rxstatus.rs_moreaggr) 599 ATH_RSSI_LPF(priv->rx.last_rssi,
582 ATH_RSSI_LPF(priv->rx.last_rssi, 600 rxbuf->rxstatus.rs_rssi);
583 rxbuf->rxstatus.rs_rssi);
584 601
585 last_rssi = priv->rx.last_rssi; 602 last_rssi = priv->rx.last_rssi;
586 603
587 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 604 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
588 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, 605 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
589 ATH_RSSI_EP_MULTIPLIER); 606 ATH_RSSI_EP_MULTIPLIER);
590 607
591 if (rxbuf->rxstatus.rs_rssi < 0) 608 if (rxbuf->rxstatus.rs_rssi < 0)
592 rxbuf->rxstatus.rs_rssi = 0; 609 rxbuf->rxstatus.rs_rssi = 0;
593 610
594 if (ieee80211_is_beacon(fc)) 611 if (ieee80211_is_beacon(fc))
595 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; 612 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
596 }
597 613
598 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 614 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
599 rx_status->band = hw->conf.channel->band; 615 rx_status->band = hw->conf.channel->band;
600 rx_status->freq = hw->conf.channel->center_freq; 616 rx_status->freq = hw->conf.channel->center_freq;
601 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; 617 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
602 rx_status->antenna = rxbuf->rxstatus.rs_antenna; 618 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
603 rx_status->flag |= RX_FLAG_TSFT; 619 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
604 620
605 return true; 621 return true;
606 622
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f66c882a39e2..79aec983279f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -140,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
140 RATE(540, 0x0c, 0), 140 RATE(540, 0x0c, 0),
141}; 141};
142 142
143#ifdef CONFIG_MAC80211_LEDS
144static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
145 { .throughput = 0 * 1024, .blink_time = 334 },
146 { .throughput = 1 * 1024, .blink_time = 260 },
147 { .throughput = 5 * 1024, .blink_time = 220 },
148 { .throughput = 10 * 1024, .blink_time = 190 },
149 { .throughput = 20 * 1024, .blink_time = 170 },
150 { .throughput = 50 * 1024, .blink_time = 150 },
151 { .throughput = 70 * 1024, .blink_time = 130 },
152 { .throughput = 100 * 1024, .blink_time = 110 },
153 { .throughput = 200 * 1024, .blink_time = 80 },
154 { .throughput = 300 * 1024, .blink_time = 50 },
155};
156#endif
157
143static void ath9k_deinit_softc(struct ath_softc *sc); 158static void ath9k_deinit_softc(struct ath_softc *sc);
144 159
145/* 160/*
@@ -731,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
731 746
732 ath9k_init_txpower_limits(sc); 747 ath9k_init_txpower_limits(sc);
733 748
749#ifdef CONFIG_MAC80211_LEDS
750 /* must be initialized before ieee80211_register_hw */
751 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
752 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
753 ARRAY_SIZE(ath9k_tpt_blink));
754#endif
755
734 /* Register with mac80211 */ 756 /* Register with mac80211 */
735 error = ieee80211_register_hw(hw); 757 error = ieee80211_register_hw(hw);
736 if (error) 758 if (error)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index c75d40fb86f1..5efc869d65ff 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -891,7 +891,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
891 struct ath_common *common = ath9k_hw_common(ah); 891 struct ath_common *common = ath9k_hw_common(ah);
892 892
893 if (!(ints & ATH9K_INT_GLOBAL)) 893 if (!(ints & ATH9K_INT_GLOBAL))
894 ath9k_hw_enable_interrupts(ah); 894 ath9k_hw_disable_interrupts(ah);
895 895
896 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 896 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
897 897
@@ -969,7 +969,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
969 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 969 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
970 } 970 }
971 971
972 ath9k_hw_enable_interrupts(ah); 972 if (ints & ATH9K_INT_GLOBAL)
973 ath9k_hw_enable_interrupts(ah);
973 974
974 return; 975 return;
975} 976}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a71550049d84..2e228aada1a9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -910,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
910 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 910 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
911 911
912 ieee80211_wake_queues(hw); 912 ieee80211_wake_queues(hw);
913 ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
914
913out: 915out:
914 spin_unlock_bh(&sc->sc_pcu_lock); 916 spin_unlock_bh(&sc->sc_pcu_lock);
915 917
@@ -923,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
923 int r; 925 int r;
924 926
925 ath9k_ps_wakeup(sc); 927 ath9k_ps_wakeup(sc);
928 cancel_delayed_work_sync(&sc->hw_pll_work);
929
926 spin_lock_bh(&sc->sc_pcu_lock); 930 spin_lock_bh(&sc->sc_pcu_lock);
927 931
928 ieee80211_stop_queues(hw); 932 ieee80211_stop_queues(hw);
@@ -1142,8 +1146,7 @@ mutex_unlock:
1142 return r; 1146 return r;
1143} 1147}
1144 1148
1145static int ath9k_tx(struct ieee80211_hw *hw, 1149static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1146 struct sk_buff *skb)
1147{ 1150{
1148 struct ath_softc *sc = hw->priv; 1151 struct ath_softc *sc = hw->priv;
1149 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1152 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1200,10 +1203,9 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1200 goto exit; 1203 goto exit;
1201 } 1204 }
1202 1205
1203 return 0; 1206 return;
1204exit: 1207exit:
1205 dev_kfree_skb_any(skb); 1208 dev_kfree_skb_any(skb);
1206 return 0;
1207} 1209}
1208 1210
1209static void ath9k_stop(struct ieee80211_hw *hw) 1211static void ath9k_stop(struct ieee80211_hw *hw)
@@ -1214,9 +1216,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1214 1216
1215 mutex_lock(&sc->mutex); 1217 mutex_lock(&sc->mutex);
1216 1218
1217 if (led_blink)
1218 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1219
1220 cancel_delayed_work_sync(&sc->tx_complete_work); 1219 cancel_delayed_work_sync(&sc->tx_complete_work);
1221 cancel_delayed_work_sync(&sc->hw_pll_work); 1220 cancel_delayed_work_sync(&sc->hw_pll_work);
1222 cancel_work_sync(&sc->paprd_work); 1221 cancel_work_sync(&sc->paprd_work);
@@ -2131,7 +2130,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2131{ 2130{
2132#define ATH_FLUSH_TIMEOUT 60 /* ms */ 2131#define ATH_FLUSH_TIMEOUT 60 /* ms */
2133 struct ath_softc *sc = hw->priv; 2132 struct ath_softc *sc = hw->priv;
2134 struct ath_txq *txq; 2133 struct ath_txq *txq = NULL;
2135 struct ath_hw *ah = sc->sc_ah; 2134 struct ath_hw *ah = sc->sc_ah;
2136 struct ath_common *common = ath9k_hw_common(ah); 2135 struct ath_common *common = ath9k_hw_common(ah);
2137 int i, j, npend = 0; 2136 int i, j, npend = 0;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index daf171d2f610..cb559e345b86 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -983,7 +983,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
983 rx_status->freq = hw->conf.channel->center_freq; 983 rx_status->freq = hw->conf.channel->center_freq;
984 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 984 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
985 rx_status->antenna = rx_stats->rs_antenna; 985 rx_status->antenna = rx_stats->rs_antenna;
986 rx_status->flag |= RX_FLAG_TSFT; 986 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
987 987
988 return 0; 988 return 0;
989} 989}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 64b226a78b2e..8fa8acfde62e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -878,6 +878,7 @@
878enum ath_usb_dev { 878enum ath_usb_dev {
879 AR9280_USB = 1, /* AR7010 + AR9280, UB94 */ 879 AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
880 AR9287_USB = 2, /* AR7010 + AR9287, UB95 */ 880 AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
881 STORAGE_DEVICE = 3,
881}; 882};
882 883
883#define AR_DEVID_7010(_ah) \ 884#define AR_DEVID_7010(_ah) \
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index dc862f5e1162..d3d24904f62f 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
123void ath9k_swba_tasklet(unsigned long data) 123void ath9k_swba_tasklet(unsigned long data)
124{ 124{
125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
126 struct ath_common *common = ath9k_hw_common(priv->ah);
127
128 ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
129 126
130 ath9k_htc_swba(priv, priv->wmi->beacon_pending); 127 ath9k_htc_swba(priv, priv->wmi->beacon_pending);
131
132} 128}
133 129
134void ath9k_fatal_work(struct work_struct *work) 130void ath9k_fatal_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 420d437f9580..c6a5fae634a0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -534,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
534void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 534void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
535 535
536/* TX */ 536/* TX */
537int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 537void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
538void carl9170_tx_janitor(struct work_struct *work); 538void carl9170_tx_janitor(struct work_struct *work);
539void carl9170_tx_process_status(struct ar9170 *ar, 539void carl9170_tx_process_status(struct ar9170 *ar,
540 const struct carl9170_rsp *cmd); 540 const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6f41e21d3a1c..0ef70b6fc512 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1339,7 +1339,7 @@ err_unlock_rcu:
1339 return false; 1339 return false;
1340} 1340}
1341 1341
1342int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1342void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1343{ 1343{
1344 struct ar9170 *ar = hw->priv; 1344 struct ar9170 *ar = hw->priv;
1345 struct ieee80211_tx_info *info; 1345 struct ieee80211_tx_info *info;
@@ -1373,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1373 } 1373 }
1374 1374
1375 carl9170_tx(ar); 1375 carl9170_tx(ar);
1376 return NETDEV_TX_OK; 1376 return;
1377 1377
1378err_free: 1378err_free:
1379 ar->tx_dropped++; 1379 ar->tx_dropped++;
1380 dev_kfree_skb_any(skb); 1380 dev_kfree_skb_any(skb);
1381 return NETDEV_TX_OK;
1382} 1381}
1383 1382
1384void carl9170_tx_scheduler(struct ar9170 *ar) 1383void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964f..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
118 { USB_DEVICE(0x057c, 0x8402) }, 118 { USB_DEVICE(0x057c, 0x8402) },
119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
120 { USB_DEVICE(0x1668, 0x1200) }, 120 { USB_DEVICE(0x1668, 0x1200) },
121 /* Airlive X.USB a/b/g/n */
122 { USB_DEVICE(0x1b75, 0x9170) },
121 123
122 /* terminate */ 124 /* terminate */
123 {} 125 {}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 22bc9f17f634..57eb5b649730 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
3203 mutex_unlock(&wl->mutex); 3203 mutex_unlock(&wl->mutex);
3204} 3204}
3205 3205
3206static int b43_op_tx(struct ieee80211_hw *hw, 3206static void b43_op_tx(struct ieee80211_hw *hw,
3207 struct sk_buff *skb) 3207 struct sk_buff *skb)
3208{ 3208{
3209 struct b43_wl *wl = hw_to_b43_wl(hw); 3209 struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
3211 if (unlikely(skb->len < 2 + 2 + 6)) { 3211 if (unlikely(skb->len < 2 + 2 + 6)) {
3212 /* Too short, this can't be a valid frame. */ 3212 /* Too short, this can't be a valid frame. */
3213 dev_kfree_skb_any(skb); 3213 dev_kfree_skb_any(skb);
3214 return NETDEV_TX_OK; 3214 return;
3215 } 3215 }
3216 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 3216 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
3217 3217
3218 skb_queue_tail(&wl->tx_queue, skb); 3218 skb_queue_tail(&wl->tx_queue, skb);
3219 ieee80211_queue_work(wl->hw, &wl->tx_work); 3219 ieee80211_queue_work(wl->hw, &wl->tx_work);
3220
3221 return NETDEV_TX_OK;
3222} 3220}
3223 3221
3224static void b43_qos_params_upload(struct b43_wldev *dev, 3222static void b43_qos_params_upload(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index ab81ed8b19d7..9f5a3c993239 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
430 bool workaround = false; 430 bool workaround = false;
431 431
432 if (sprom->revision < 4) 432 if (sprom->revision < 4)
433 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM || 433 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
434 binfo->type != 0x46D || 434 binfo->type == 0x46D &&
435 binfo->rev < 0x41); 435 binfo->rev >= 0x41);
436 else 436 else
437 workaround = 437 workaround =
438 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS); 438 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -1281,17 +1281,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
1281 B43_NPHY_TABLE_DATALO, tmp); 1281 B43_NPHY_TABLE_DATALO, tmp);
1282 } 1282 }
1283 } 1283 }
1284 }
1284 1285
1285 b43_nphy_set_rf_sequence(dev, 5, 1286 b43_nphy_set_rf_sequence(dev, 5,
1286 rfseq_events, rfseq_delays, 3); 1287 rfseq_events, rfseq_delays, 3);
1287 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, 1288 b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
1288 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 1289 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
1289 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); 1290 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
1290 1291
1291 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1292 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
1292 b43_phy_maskset(dev, B43_PHY_N(0xC5D), 1293 b43_phy_maskset(dev, B43_PHY_N(0xC5D),
1293 0xFF80, 4); 1294 0xFF80, 4);
1294 }
1295 } 1295 }
1296} 1296}
1297 1297
@@ -2128,7 +2128,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2128 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2128 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2129 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); 2129 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
2130 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); 2130 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
2131 } else if (dev->phy.rev == 2) { 2131 } else {
2132 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); 2132 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
2133 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); 2133 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
2134 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2134 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
@@ -2179,7 +2179,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2179 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); 2179 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
2180 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); 2180 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
2181 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); 2181 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
2182 } else if (dev->phy.rev == 2) { 2182 } else {
2183 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); 2183 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
2184 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); 2184 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
2185 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); 2185 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index dc8ef09a8552..c42b2acea24e 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
1097 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1097 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1098}; 1098};
1099 1099
1100/* static tables, PHY revision >= 3 */
1101static const u32 b43_ntab_framestruct_r3[] = {
1102 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1103 0x09804506, 0x00100030, 0x09804507, 0x00100030,
1104 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1106 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
1107 0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
1108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1110 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
1111 0x1980c506, 0x00100030, 0x21810506, 0x00100030,
1112 0x21810506, 0x00100030, 0x01800504, 0x00100030,
1113 0x11808505, 0x00100030, 0x29814507, 0x01100030,
1114 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
1115 0x21810506, 0x00100030, 0x21810506, 0x00100030,
1116 0x29814507, 0x01100030, 0x00000000, 0x00000000,
1117 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1118 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1119 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
1120 0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
1121 0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
1122 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1123 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
1124 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1125 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1126 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1127 0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
1128 0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
1129 0x29814507, 0x01100030, 0x00000000, 0x00000000,
1130 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
1131 0x1980c506, 0x00100030, 0x29814507, 0x01100030,
1132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1133 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1134 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
1135 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
1136 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
1137 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1138 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
1139 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
1140 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1141 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1142 0x40021404, 0x00100000, 0x02001405, 0x00100040,
1143 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
1144 0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
1145 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
1146 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1147 0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
1148 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
1149 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1150 0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
1151 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
1152 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
1153 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
1154 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1155 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
1156 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1158 0x50029404, 0x00100000, 0x32019405, 0x00100040,
1159 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
1160 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
1161 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
1162 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
1163 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
1164 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1165 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1166 0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
1167 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
1168 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
1169 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1170 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
1171 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
1172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1173 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1174 0x40021404, 0x000f4800, 0x62031405, 0x00100040,
1175 0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
1176 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1177 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1178 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
1179 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
1180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1182 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
1183 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
1184 0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
1185 0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
1186 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
1187 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1188 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1189 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1190 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
1191 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
1192 0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
1193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1198 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
1199 0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
1200 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
1201 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
1202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1206 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
1207 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
1208 0x2981450f, 0x01100030, 0x00000000, 0x00000000,
1209 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1210 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1211 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1212 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1214 0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
1215 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
1216 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
1217 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
1218 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
1219 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1220 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1221 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1222 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1223 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
1224 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
1225 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1226 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1227 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1230 0x50029404, 0x00100000, 0x32019405, 0x00100040,
1231 0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
1232 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
1233 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
1234 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1235 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1236 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1237 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1238 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
1239 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
1240 0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
1241 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1246 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1247 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
1248 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
1249 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
1250 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1253 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1254 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1255 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
1256 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
1257 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
1258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1259 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1260 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1261 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1262 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
1263 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
1264 0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
1265 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1266 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1267 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1270 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
1271 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
1272 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
1273 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1274 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1275 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1276 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1277 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1278 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1279 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1280 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1281 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1287 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1288 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1289 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1290 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1291 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1292 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1293 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1299 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1300 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1301 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1302 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1303 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1305 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1310};
1311
1312static const u16 b43_ntab_pilot_r3[] = {
1313 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
1314 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
1315 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
1316 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
1317 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
1318 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
1319 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
1320 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
1321 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
1322 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
1323 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
1324 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
1325 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
1326 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
1327 0xf0a0, 0xf028, 0xffff, 0xffff,
1328};
1329
1330static const u32 b43_ntab_tmap_r3[] = {
1331 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1332 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1333 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
1334 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
1335 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
1336 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1337 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1338 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1339 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
1340 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
1341 0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
1342 0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
1343 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
1344 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1345 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
1346 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
1347 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
1348 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
1349 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
1350 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
1351 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1359 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1360 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1361 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1362 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1363 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1364 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1365 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1366 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
1367 0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
1368 0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
1369 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1370 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1371 0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
1372 0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
1373 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
1374 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
1375 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1376 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1377 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1378 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
1379 0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
1380 0x22222222, 0x22222222, 0x22f22222, 0x00000222,
1381 0x11000000, 0x1111f111, 0x11111111, 0x11111111,
1382 0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
1383 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
1384 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
1385 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1386 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
1387 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
1388 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
1389 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1390 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1391 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
1392 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
1393 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
1394 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
1395 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
1396 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
1397 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
1398 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
1399 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
1400 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
1401 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
1402 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
1403 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
1404 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
1405 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
1406 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
1407 0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
1408 0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
1409 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
1410 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
1411 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
1412 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
1413 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1414 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1415 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1416 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1417 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1418 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
1419 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1420 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1421 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1422 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
1423 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1424 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1425 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
1426 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
1427 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1428 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1429 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
1430 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
1431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1433 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1434 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1435 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1436 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1437 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1443};
1444
1445static const u32 b43_ntab_intlevel_r3[] = {
1446 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
1447 0x00c1188d, 0x080024d2, 0x00000070,
1448};
1449
1450static const u32 b43_ntab_tdtrn_r3[] = {
1451 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
1452 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
1453 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
1454 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
1455 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
1456 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
1457 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
1458 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
1459 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
1460 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
1461 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
1462 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
1463 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
1464 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
1465 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
1466 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
1467 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
1468 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
1469 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
1470 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
1471 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
1472 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
1473 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
1474 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
1475 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
1476 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
1477 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
1478 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
1479 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
1480 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
1481 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
1482 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
1483 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
1484 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
1485 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
1486 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
1487 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
1488 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
1489 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
1490 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
1491 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1492 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1493 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1494 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1495 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1496 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1497 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1498 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1499 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1500 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1501 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1502 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1503 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1504 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1505 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1506 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1507 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1508 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1509 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1511 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1512 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1513 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1514 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1515 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
1516 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
1517 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
1518 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
1519 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
1520 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
1521 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
1522 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
1523 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
1524 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
1525 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
1526 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
1527 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
1528 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
1529 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
1530 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
1531 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
1532 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
1533 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
1534 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
1535 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
1536 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
1537 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
1538 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
1539 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
1540 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
1541 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
1542 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
1543 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
1544 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
1545 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
1546 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
1547 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
1548 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
1549 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
1550 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
1551 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
1552 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
1553 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
1554 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
1555 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
1556 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
1557 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
1558 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
1559 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
1560 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
1561 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
1562 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
1563 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
1564 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
1565 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
1566 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
1567 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
1568 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
1569 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
1570 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
1571 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
1572 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
1573 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
1574 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
1575 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
1576 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
1577 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
1578 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
1579 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
1580 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
1581 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
1582 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
1583 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
1584 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
1585 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
1586 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
1587 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
1588 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
1589 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
1590 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
1591 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
1592 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
1593 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
1594 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
1595 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
1596 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
1597 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
1598 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
1599 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
1600 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
1601 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
1602 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
1603 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
1604 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
1605 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
1606 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
1607 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
1608 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
1609 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
1610 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
1611 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
1612 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
1613 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
1614 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
1615 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
1616 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
1617 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
1618 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1619 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
1620 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
1621 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
1622 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
1623 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
1624 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
1625 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
1626 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
1627};
1628
1629static const u32 b43_ntab_noisevar0_r3[] = {
1630 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1631 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1632 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1633 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1634 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1635 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1636 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1637 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1638 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1639 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1640 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1641 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1642 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1643 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1644 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1645 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1646 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1647 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1648 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1649 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1650 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1651 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1652 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1653 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1654 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1655 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1656 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1657 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1658 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1659 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1660 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1661 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1662 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1663 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1664 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1665 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1666 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1667 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1668 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1669 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1670 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1671 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1672 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1673 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1674 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1675 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1676 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1677 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1678 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1679 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1680 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1681 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1682 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1683 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1684 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1685 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1686 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1687 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1688 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1689 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1690 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1691 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1692 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1693 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1694};
1695
1696static const u32 b43_ntab_noisevar1_r3[] = {
1697 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1698 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1699 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1700 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1701 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1702 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1703 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1704 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1705 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1706 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1707 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1708 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1709 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1710 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1711 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1712 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1713 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1714 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1715 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1716 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1717 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1718 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1719 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1720 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1721 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1722 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1723 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1724 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1725 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1726 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1727 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1728 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1729 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1730 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1731 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1732 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1733 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1734 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1735 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1736 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1737 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1738 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1739 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1740 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1741 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1742 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1743 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1744 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1745 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1746 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1747 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1748 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1749 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1750 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1751 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1752 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1753 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1754 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1755 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1756 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1757 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1758 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1759 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1760 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
1761};
1762
1763static const u16 b43_ntab_mcs_r3[] = {
1764 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
1765 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
1766 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
1767 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
1768 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
1769 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
1770 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
1771 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
1772 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
1773 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
1774 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
1775 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
1776 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
1777 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
1778 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
1779 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
1780 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
1781 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1782 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1783 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1784 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
1785 0x0007, 0x0007,
1786};
1787
1788static const u32 b43_ntab_tdi20a0_r3[] = {
1789 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
1790 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
1791 0x00020301, 0x00030504, 0x00040708, 0x0005090b,
1792 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
1793 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
1794 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
1795 0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
1796 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
1797 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
1798 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
1799 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
1800 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
1801 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
1802 0x00000000, 0x00000000, 0x00000000,
1803};
1804
1805static const u32 b43_ntab_tdi20a1_r3[] = {
1806 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
1807 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
1808 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
1809 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
1810 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
1811 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
1812 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
1813 0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
1814 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
1815 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
1816 0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
1817 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
1818 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
1819 0x00000000, 0x00000000, 0x00000000,
1820};
1821
1822static const u32 b43_ntab_tdi40a0_r3[] = {
1823 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
1824 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
1825 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
1826 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
1827 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
1828 0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
1829 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
1830 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
1831 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
1832 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
1833 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
1834 0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
1835 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
1836 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
1837 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
1838 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
1839 0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
1840 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
1841 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
1842 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
1843 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
1844 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
1845 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
1846 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
1847 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
1848 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
1849 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
1850 0x00000000, 0x00000000,
1851};
1852
1853static const u32 b43_ntab_tdi40a1_r3[] = {
1854 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
1855 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
1856 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
1857 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
1858 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
1859 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
1860 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
1861 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
1862 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
1863 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
1864 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
1865 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
1866 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
1867 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
1868 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
1869 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
1870 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
1871 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
1872 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
1873 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
1874 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
1875 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
1876 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
1877 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
1878 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
1879 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
1880 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
1881 0x00000000, 0x00000000,
1882};
1883
1884static const u32 b43_ntab_pilotlt_r3[] = {
1885 0x76540213, 0x62407351, 0x76543210, 0x76540213,
1886 0x76540213, 0x76430521,
1887};
1888
1889static const u32 b43_ntab_channelest_r3[] = {
1890 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1891 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1892 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1893 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1894 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1895 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1896 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1897 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1898 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1899 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1900 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1901 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1902 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1903 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1904 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1905 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1906 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1907 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1908 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1909 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1910 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1911 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1912 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1913 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1914};
1915
1916static const u8 b43_ntab_framelookup_r3[] = {
1917 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
1918 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
1919 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
1920 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
1921};
1922
1923static const u8 b43_ntab_estimatepowerlt0_r3[] = {
1924 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
1925 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
1926 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
1927 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
1928 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
1929 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
1930 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
1931 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
1932};
1933
1934static const u8 b43_ntab_estimatepowerlt1_r3[] = {
1935 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
1936 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
1937 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
1938 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
1939 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
1940 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
1941 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
1942 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
1943};
1944
1945static const u8 b43_ntab_adjustpower0_r3[] = {
1946 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1947 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1948 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1949 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1950 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1951 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1952 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1953 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1954 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1955 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1956 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1957 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1958 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1959 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1960 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1961 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1962};
1963
1964static const u8 b43_ntab_adjustpower1_r3[] = {
1965 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1966 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1967 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1968 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1969 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1970 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1971 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1972 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1973 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1974 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1975 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1976 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1977 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1978 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1979 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1980 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1981};
1982
1983static const u32 b43_ntab_gainctl0_r3[] = {
1984 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
1985 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
1986 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
1987 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
1988 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
1989 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
1990 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
1991 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
1992 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
1993 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
1994 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
1995 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
1996 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
1997 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
1998 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
1999 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
2000 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
2001 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
2002 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
2003 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
2004 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
2005 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
2006 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
2007 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
2008 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
2009 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
2010 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
2011 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
2012 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
2013 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
2014 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
2015 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
2016};
2017
2018static const u32 b43_ntab_gainctl1_r3[] = {
2019 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
2020 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
2021 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
2022 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
2023 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
2024 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
2025 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
2026 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
2027 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
2028 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
2029 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
2030 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
2031 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
2032 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
2033 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
2034 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
2035 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
2036 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
2037 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
2038 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
2039 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
2040 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
2041 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
2042 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
2043 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
2044 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
2045 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
2046 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
2047 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
2048 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
2049 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
2050 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
2051};
2052
2053static const u32 b43_ntab_iqlt0_r3[] = {
2054 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2055 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2056 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2057 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2058 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2059 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2060 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2061 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2062 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2063 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2064 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2065 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2066 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2067 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2068 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2069 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2070 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2071 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2072 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2073 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2074 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2075 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2076 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2077 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2078 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2079 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2080 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2081 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2082 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2083 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2084 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2085 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2086};
2087
2088static const u32 b43_ntab_iqlt1_r3[] = {
2089 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2090 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2091 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2092 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2093 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2094 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2095 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2096 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2097 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2098 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2099 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2100 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2101 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2102 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2103 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2104 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2106 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2107 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2110 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2111 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2112 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2113 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2114 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2115 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2116 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2117 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2118 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2119 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2120 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2121};
2122
2123static const u16 b43_ntab_loftlt0_r3[] = {
2124 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2125 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2126 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2127 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2128 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2129 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2130 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2131 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2132 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2133 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2134 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2135 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2136 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2137 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2138 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2139 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2140 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2141 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2142 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2143 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2144 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2145 0x0000, 0x0000,
2146};
2147
2148static const u16 b43_ntab_loftlt1_r3[] = {
2149 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2150 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2151 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2152 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2153 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2154 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2155 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2156 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2157 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2158 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2159 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2160 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2161 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2162 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2163 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2164 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2165 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2166 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2167 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2168 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2169 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
2170 0x0000, 0x0000,
2171};
2172
2173/* TX gain tables */
1100const u32 b43_ntab_tx_gain_rev0_1_2[] = { 2174const u32 b43_ntab_tx_gain_rev0_1_2[] = {
1101 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 2175 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
1102 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44, 2176 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1813,7 +2887,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
1813#define ntab_upload(dev, offset, data) do { \ 2887#define ntab_upload(dev, offset, data) do { \
1814 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \ 2888 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \
1815 } while (0) 2889 } while (0)
1816
1817void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) 2890void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
1818{ 2891{
1819 /* Static tables */ 2892 /* Static tables */
@@ -1847,10 +2920,39 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
1847 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); 2920 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
1848} 2921}
1849 2922
2923#define ntab_upload_r3(dev, offset, data) do { \
2924 b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
2925 } while (0)
1850void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) 2926void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
1851{ 2927{
1852 /* Static tables */ 2928 /* Static tables */
1853 /* TODO */ 2929 ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
2930 ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
2931 ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
2932 ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
2933 ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
2934 ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
2935 ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
2936 ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
2937 ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
2938 ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
2939 ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
2940 ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
2941 ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
2942 ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
2943 ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
2944 ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
2945 b43_ntab_estimatepowerlt0_r3);
2946 ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
2947 b43_ntab_estimatepowerlt1_r3);
2948 ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
2949 ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
2950 ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
2951 ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
2952 ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
2953 ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
2954 ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
2955 ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
1854 2956
1855 /* Volatile tables */ 2957 /* Volatile tables */
1856 /* TODO */ 2958 /* TODO */
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4ec593ba3eef..016a480b2dc6 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -109,6 +109,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
109#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 109#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
110#define B43_NTAB_C1_LOFEEDTH_SIZE 128 110#define B43_NTAB_C1_LOFEEDTH_SIZE 128
111 111
112/* Static N-PHY tables, PHY revision >= 3 */
113#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
114#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
115#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
116#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
117#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
118#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
119#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
120#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
121#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
122#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
123#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
124#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
125#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
126#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
127#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
128#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
129#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
130#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
131#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
132#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
133#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
134#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
135#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
136#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */
137#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
138
112#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18 139#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
113#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18 140#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
114#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18 141#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index e6b0528f3b52..e5be381c17bc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -32,6 +32,36 @@
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
34 34
35static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
36 { B43_CCK_RATE_1MB, 0x0, 0x0 },
37 { B43_CCK_RATE_2MB, 0x0, 0x1 },
38 { B43_CCK_RATE_5MB, 0x0, 0x2 },
39 { B43_CCK_RATE_11MB, 0x0, 0x3 },
40 { B43_OFDM_RATE_6MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
41 { B43_OFDM_RATE_9MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
42 { B43_OFDM_RATE_12MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
43 { B43_OFDM_RATE_18MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
44 { B43_OFDM_RATE_24MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
45 { B43_OFDM_RATE_36MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
46 { B43_OFDM_RATE_48MB, B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
47 { B43_OFDM_RATE_54MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
48};
49
50static const struct b43_tx_legacy_rate_phy_ctl_entry *
51b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
52{
53 const struct b43_tx_legacy_rate_phy_ctl_entry *e;
54 unsigned int i;
55
56 for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
57 e = &(b43_tx_legacy_rate_phy_ctl[i]);
58 if (e->bitrate == bitrate)
59 return e;
60 }
61
62 B43_WARN_ON(1);
63 return NULL;
64}
35 65
36/* Extract the bitrate index out of a CCK PLCP header. */ 66/* Extract the bitrate index out of a CCK PLCP header. */
37static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp) 67static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
145 } 175 }
146} 176}
147 177
178static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
179{
180 const struct b43_phy *phy = &dev->phy;
181 const struct b43_tx_legacy_rate_phy_ctl_entry *e;
182 u16 control = 0;
183 u16 bw;
184
185 if (phy->type == B43_PHYTYPE_LP)
186 bw = B43_TXH_PHY1_BW_20;
187 else /* FIXME */
188 bw = B43_TXH_PHY1_BW_20;
189
190 if (0) { /* FIXME: MIMO */
191 } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
192 control = bw;
193 } else {
194 control = bw;
195 e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
196 if (e) {
197 control |= e->coding_rate;
198 control |= e->modulation;
199 }
200 control |= B43_TXH_PHY1_MODE_SISO;
201 }
202
203 return control;
204}
205
148static u8 b43_calc_fallback_rate(u8 bitrate) 206static u8 b43_calc_fallback_rate(u8 bitrate)
149{ 207{
150 switch (bitrate) { 208 switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
437 extra_ft |= B43_TXH_EFT_RTSFB_OFDM; 495 extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
438 else 496 else
439 extra_ft |= B43_TXH_EFT_RTSFB_CCK; 497 extra_ft |= B43_TXH_EFT_RTSFB_CCK;
498
499 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
500 phy->type == B43_PHYTYPE_N) {
501 txhdr->phy_ctl1_rts = cpu_to_le16(
502 b43_generate_tx_phy_ctl1(dev, rts_rate));
503 txhdr->phy_ctl1_rts_fb = cpu_to_le16(
504 b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
505 }
440 } 506 }
441 507
442 /* Magic cookie */ 508 /* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
445 else 511 else
446 txhdr->new_format.cookie = cpu_to_le16(cookie); 512 txhdr->new_format.cookie = cpu_to_le16(cookie);
447 513
514 if (phy->type == B43_PHYTYPE_N) {
515 txhdr->phy_ctl1 =
516 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
517 txhdr->phy_ctl1_fb =
518 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
519 }
520
448 /* Apply the bitfields */ 521 /* Apply the bitfields */
449 txhdr->mac_ctl = cpu_to_le32(mac_ctl); 522 txhdr->mac_ctl = cpu_to_le32(mac_ctl);
450 txhdr->phy_ctl = cpu_to_le16(phy_ctl); 523 txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
652 status.mactime += mactime; 725 status.mactime += mactime;
653 if (low_mactime_now <= mactime) 726 if (low_mactime_now <= mactime)
654 status.mactime -= 0x10000; 727 status.mactime -= 0x10000;
655 status.flag |= RX_FLAG_TSFT; 728 status.flag |= RX_FLAG_MACTIME_MPDU;
656 } 729 }
657 730
658 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 731 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d4cf9b390af3..42debb5cd6fa 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -73,6 +73,12 @@ struct b43_txhdr {
73 } __packed; 73 } __packed;
74} __packed; 74} __packed;
75 75
76struct b43_tx_legacy_rate_phy_ctl_entry {
77 u8 bitrate;
78 u16 coding_rate;
79 u16 modulation;
80};
81
76/* MAC TX control */ 82/* MAC TX control */
77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ 83#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
78#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */ 84#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1f11e1670bf0..c7fd73e3ad76 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
2442 return err; 2442 return err;
2443} 2443}
2444 2444
2445static int b43legacy_op_tx(struct ieee80211_hw *hw, 2445static void b43legacy_op_tx(struct ieee80211_hw *hw,
2446 struct sk_buff *skb) 2446 struct sk_buff *skb)
2447{ 2447{
2448 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2448 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2449 struct b43legacy_wldev *dev = wl->current_dev; 2449 struct b43legacy_wldev *dev = wl->current_dev;
@@ -2466,7 +2466,6 @@ out:
2466 /* Drop the packet. */ 2466 /* Drop the packet. */
2467 dev_kfree_skb_any(skb); 2467 dev_kfree_skb_any(skb);
2468 } 2468 }
2469 return NETDEV_TX_OK;
2470} 2469}
2471 2470
2472static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue, 2471static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 7d177d97f1f7..3a95541708a6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
572 status.mactime += mactime; 572 status.mactime += mactime;
573 if (low_mactime_now <= mactime) 573 if (low_mactime_now <= mactime)
574 status.mactime -= 0x10000; 574 status.mactime -= 0x10000;
575 status.flag |= RX_FLAG_TSFT; 575 status.flag |= RX_FLAG_MACTIME_MPDU;
576 } 576 }
577 577
578 chanid = (chanstat & B43legacy_RX_CHAN_ID) >> 578 chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices"
3 depends on PCI && MAC80211
4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9
10menu "Debugging Options"
11 depends on IWLWIFI_LEGACY
12
13config IWLWIFI_LEGACY_DEBUG
14 bool "Enable full debugging output in 4965 and 3945 drivers"
15 depends on IWLWIFI_LEGACY
16 ---help---
17 This option will enable debug tracing output for the iwlwifilegacy
18 drivers.
19
20 This will result in the kernel module being ~100k larger. You can
21 control which debug output is sent to the kernel log by setting the
22 value in
23
24 /sys/class/net/wlan0/device/debug_level
25
26 This entry will only exist if this option is enabled.
27
28 To set a value, simply echo an 8-byte hex value to the same file:
29
30 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
31
32 You can find the list of debug mask values in:
33 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
34
35 If this is your first time using this driver, you should say Y here
36 as the debug information can assist others in helping you resolve
37 any problems you may encounter.
38
39config IWLWIFI_LEGACY_DEBUGFS
40 bool "4965 and 3945 debugfs support"
41 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
42 ---help---
43 Enable creation of debugfs files for the iwlwifilegacy drivers. This
44 is a low-impact option that allows getting insight into the
45 driver's state at runtime.
46
47config IWLWIFI_LEGACY_DEVICE_TRACING
48 bool "iwlwifilegacy legacy device access tracing"
49 depends on IWLWIFI_LEGACY
50 depends on EVENT_TRACING
51 help
52 Say Y here to trace all commands, including TX frames and IO
53 accesses, sent to the device. If you say yes, iwlwifilegacy will
54 register with the ftrace framework for event tracing and dump
55 all this information to the ringbuffer, you may need to
56 increase the ringbuffer size. See the ftrace documentation
57 for more information.
58
59 When tracing is not enabled, this option still has some
60 (though rather small) overhead.
61
62 If unsure, say Y so we can help you better when problems
63 occur.
64endmenu
65
66config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY
69 ---help---
70 This option enables support for
71
72 Select to build the driver supporting the:
73
74 Intel Wireless WiFi Link 4965AGN
75
76 This driver uses the kernel's mac80211 subsystem.
77
78 In order to use this driver, you will need a microcode (uCode)
79 image for it. You can obtain the microcode from:
80
81 <http://intellinuxwireless.org/>.
82
83 The microcode is typically installed in /lib/firmware. You can
84 look in the hotplug script /etc/hotplug/firmware.agent to
85 determine which directory FIRMWARE_DIR is set to when the script
86 runs.
87
88 If you want to compile the driver as a module ( = code which can be
89 inserted in and removed from the running kernel whenever you want),
90 say M here and read <file:Documentation/kbuild/modules.txt>. The
91 module will be called iwl4965.
92
93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY
96 ---help---
97 Select to build the driver supporting the:
98
99 Intel PRO/Wireless 3945ABG/BG Network Connection
100
101 This driver uses the kernel's mac80211 subsystem.
102
103 In order to use this driver, you will need a microcode (uCode)
104 image for it. You can obtain the microcode from:
105
106 <http://intellinuxwireless.org/>.
107
108 The microcode is typically installed in /lib/firmware. You can
109 look in the hotplug script /etc/hotplug/firmware.agent to
110 determine which directory FIRMWARE_DIR is set to when the script
111 runs.
112
113 If you want to compile the driver as a module ( = code which can be
114 inserted in and removed from the running kernel whenever you want),
115 say M here and read <file:Documentation/kbuild/modules.txt>. The
116 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7
8iwl-legacy-objs += $(iwl-legacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11
12# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19
20# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
24
25ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
index ef0835b01b6b..cfabb38793ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + 60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; 61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret; 62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; 63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
64 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; 65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
65 struct iwl39_statistics_rx_non_phy *general, *accum_general; 66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
66 struct iwl39_statistics_rx_non_phy *delta_general, *max_general; 67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
67 68
68 if (!iwl_is_alive(priv)) 69 if (!iwl_legacy_is_alive(priv))
69 return -EAGAIN; 70 return -EAGAIN;
70 71
71 buf = kzalloc(bufsz, GFP_KERNEL); 72 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
335 ssize_t ret; 336 ssize_t ret;
336 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; 337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
337 338
338 if (!iwl_is_alive(priv)) 339 if (!iwl_legacy_is_alive(priv))
339 return -EAGAIN; 340 return -EAGAIN;
340 341
341 buf = kzalloc(bufsz, GFP_KERNEL); 342 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
434 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; 435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
435 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; 436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
436 437
437 if (!iwl_is_alive(priv)) 438 if (!iwl_legacy_is_alive(priv))
438 return -EAGAIN; 439 return -EAGAIN;
439 440
440 buf = kzalloc(bufsz, GFP_KERNEL); 441 buf = kzalloc(bufsz, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
index 70809c53c215..8fef4b32b447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30#include "iwl-core.h" 30#include "iwl-core.h"
31#include "iwl-debug.h" 31#include "iwl-debug.h"
32 32
33#ifdef CONFIG_IWLWIFI_DEBUGFS 33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, 34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos); 35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, 36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b502a3..836c9919f82e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
185 185
186 186
187#endif /* __iwl_3945_fh_h__ */ 187#endif /* __iwl_3945_fh_h__ */
188
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834da28c..779d3cb86e2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
164/* 164/*
165 * Per-channel regulatory data. 165 * Per-channel regulatory data.
166 * 166 *
167 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 167 * Each channel that *might* be supported by 3945 has a fixed location
168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
169 * txpower (MSB). 169 * txpower (MSB).
170 * 170 *
171 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) 171 * Entries immediately below are for 20 MHz channel width.
172 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
173 * 172 *
174 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 173 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
175 */ 174 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index dc7c3a4167a9..abd923558d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -56,7 +56,7 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
56 .callback = NULL, 56 .callback = NULL,
57 }; 57 };
58 58
59 return iwl_send_cmd(priv, &cmd); 59 return iwl_legacy_send_cmd(priv, &cmd);
60} 60}
61 61
62const struct iwl_led_ops iwl3945_led_ops = { 62const struct iwl_led_ops iwl3945_led_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990adc51e7..96716276eb0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e34fbc7..977bd2477c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
89}; 89};
90 90
91#define IWL_RATE_MAX_WINDOW 62 91#define IWL_RATE_MAX_WINDOW 62
92#define IWL_RATE_FLUSH (3*HZ) 92#define IWL_RATE_FLUSH (3*HZ)
93#define IWL_RATE_WIN_FLUSH (HZ/2) 93#define IWL_RATE_WIN_FLUSH (HZ/2)
94#define IWL39_RATE_HIGH_TH 11520 94#define IWL39_RATE_HIGH_TH 11520
95#define IWL_SUCCESS_UP_TH 8960 95#define IWL_SUCCESS_UP_TH 8960
@@ -394,18 +394,18 @@ out:
394 IWL_DEBUG_INFO(priv, "leave\n"); 394 IWL_DEBUG_INFO(priv, "leave\n");
395} 395}
396 396
397static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 397static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
398{ 398{
399 return hw->priv; 399 return hw->priv;
400} 400}
401 401
402/* rate scale requires free function to be implemented */ 402/* rate scale requires free function to be implemented */
403static void rs_free(void *priv) 403static void iwl3945_rs_free(void *priv)
404{ 404{
405 return; 405 return;
406} 406}
407 407
408static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) 408static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{ 409{
410 struct iwl3945_rs_sta *rs_sta; 410 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
423 return rs_sta; 423 return rs_sta;
424} 424}
425 425
426static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, 426static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
427 void *priv_sta) 427 void *priv_sta)
428{ 428{
429 struct iwl3945_rs_sta *rs_sta = priv_sta; 429 struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
438 438
439 439
440/** 440/**
441 * rs_tx_status - Update rate control values based on Tx results 441 * iwl3945_rs_tx_status - Update rate control values based on Tx results
442 * 442 *
443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by 443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate. 444 * the hardware for each rate.
445 */ 445 */
446static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, 446static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct ieee80211_sta *sta, void *priv_sta, 447 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb) 448 struct sk_buff *skb)
449{ 449{
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
612} 612}
613 613
614/** 614/**
615 * rs_get_rate - find the rate for the requested packet 615 * iwl3945_rs_get_rate - find the rate for the requested packet
616 * 616 *
617 * Returns the ieee80211_rate structure allocated by the driver. 617 * Returns the ieee80211_rate structure allocated by the driver.
618 * 618 *
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
627 * rate table and must reference the driver allocated rate table 627 * rate table and must reference the driver allocated rate table
628 * 628 *
629 */ 629 */
630static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, 630static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 void *priv_sta, struct ieee80211_tx_rate_control *txrc) 631 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
632{ 632{
633 struct ieee80211_supported_band *sband = txrc->sband; 633 struct ieee80211_supported_band *sband = txrc->sband;
@@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
644 u32 fail_count; 644 u32 fail_count;
645 s8 scale_action = 0; 645 s8 scale_action = 0;
646 unsigned long flags; 646 unsigned long flags;
647 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; 647 u16 rate_mask;
648 s8 max_rate_idx = -1; 648 s8 max_rate_idx = -1;
649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; 649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
899 * the station is added. Since mac80211 calls this function before a 899 * the station is added. Since mac80211 calls this function before a
900 * station is added we ignore it. 900 * station is added we ignore it.
901 */ 901 */
902static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, 902static void iwl3945_rs_rate_init_stub(void *priv_r,
903 struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *priv_sta) 904 struct ieee80211_sta *sta, void *priv_sta)
904{ 905{
905} 906}
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
907static struct rate_control_ops rs_ops = { 908static struct rate_control_ops rs_ops = {
908 .module = NULL, 909 .module = NULL,
909 .name = RS_NAME, 910 .name = RS_NAME,
910 .tx_status = rs_tx_status, 911 .tx_status = iwl3945_rs_tx_status,
911 .get_rate = rs_get_rate, 912 .get_rate = iwl3945_rs_get_rate,
912 .rate_init = rs_rate_init_stub, 913 .rate_init = iwl3945_rs_rate_init_stub,
913 .alloc = rs_alloc, 914 .alloc = iwl3945_rs_alloc,
914 .free = rs_free, 915 .free = iwl3945_rs_free,
915 .alloc_sta = rs_alloc_sta, 916 .alloc_sta = iwl3945_rs_alloc_sta,
916 .free_sta = rs_free_sta, 917 .free_sta = iwl3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS 918#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = iwl3945_add_debugfs, 919 .add_sta_debugfs = iwl3945_add_debugfs,
919 .remove_sta_debugfs = iwl3945_remove_debugfs, 920 .remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
991{ 992{
992 ieee80211_rate_control_unregister(&rs_ops); 993 ieee80211_rate_control_unregister(&rs_ops);
993} 994}
994
995
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 166e9f742596..d096dc28204d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -51,7 +51,6 @@
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h" 53#include "iwl-3945-debugfs.h"
54#include "iwl-legacy.h"
55 54
56#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
57 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
172 return; 171 return;
173 } 172 }
174 173
175 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); 174 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
176 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); 175 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
177 176
178 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { 177 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
179 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", 178 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
180 disable_ptr); 179 disable_ptr);
181 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) 180 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
182 iwl_write_targ_mem(priv, 181 iwl_legacy_write_targ_mem(priv,
183 disable_ptr + (i * sizeof(u32)), 182 disable_ptr + (i * sizeof(u32)),
184 evt_disable[i]); 183 evt_disable[i]);
185 184
@@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
202 return -1; 201 return -1;
203} 202}
204 203
205#ifdef CONFIG_IWLWIFI_DEBUG 204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
206#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x 205#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
207 206
208static const char *iwl3945_get_tx_fail_reason(u32 status) 207static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
255 break; 254 break;
256 case IEEE80211_BAND_2GHZ: 255 case IEEE80211_BAND_2GHZ:
257 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 256 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
258 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 257 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
259 if (rate == IWL_RATE_11M_INDEX) 258 if (rate == IWL_RATE_11M_INDEX)
260 next_rate = IWL_RATE_5M_INDEX; 259 next_rate = IWL_RATE_5M_INDEX;
261 } 260 }
@@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
285 284
286 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); 285 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
287 286
288 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 287 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
289 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 288 q->read_ptr != index;
289 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
290 290
291 tx_info = &txq->txb[txq->q.read_ptr]; 291 tx_info = &txq->txb[txq->q.read_ptr];
292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); 292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
294 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 294 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
295 } 295 }
296 296
297 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 297 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
298 (txq_id != IWL39_CMD_QUEUE_NUM) && 298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
299 priv->mac80211_registered) 299 priv->mac80211_registered)
300 iwl_wake_queue(priv, txq); 300 iwl_legacy_wake_queue(priv, txq);
301} 301}
302 302
303/** 303/**
@@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
317 int rate_idx; 317 int rate_idx;
318 int fail; 318 int fail;
319 319
320 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 320 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
322 "is out of range [0-%d] %d %d\n", txq_id, 322 "is out of range [0-%d] %d %d\n", txq_id,
323 index, txq->q.n_bd, txq->q.write_ptr, 323 index, txq->q.n_bd, txq->q.write_ptr,
@@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
363 * RX handler implementations 363 * RX handler implementations
364 * 364 *
365 *****************************************************************************/ 365 *****************************************************************************/
366#ifdef CONFIG_IWLWIFI_DEBUGFS 366#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
367/*
368 * based on the assumption of all statistics counter are in DWORD
369 * FIXME: This function is for debugging, do not deal with
370 * the case of counters roll-over.
371 */
372static void iwl3945_accumulative_statistics(struct iwl_priv *priv, 367static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
373 __le32 *stats) 368 __le32 *stats)
374{ 369{
@@ -410,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
410 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 405 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
411 (int)sizeof(struct iwl3945_notif_statistics), 406 (int)sizeof(struct iwl3945_notif_statistics),
412 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 407 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
413#ifdef CONFIG_IWLWIFI_DEBUGFS 408#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
414 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); 409 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
415#endif 410#endif
416 iwl_recover_from_statistics(priv, pkt); 411 iwl_legacy_recover_from_statistics(priv, pkt);
417 412
418 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); 413 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
419} 414}
@@ -425,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
425 __le32 *flag = (__le32 *)&pkt->u.raw; 420 __le32 *flag = (__le32 *)&pkt->u.raw;
426 421
427 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { 422 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
428#ifdef CONFIG_IWLWIFI_DEBUGFS 423#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
429 memset(&priv->_3945.accum_statistics, 0, 424 memset(&priv->_3945.accum_statistics, 0,
430 sizeof(struct iwl3945_notif_statistics)); 425 sizeof(struct iwl3945_notif_statistics));
431 memset(&priv->_3945.delta_statistics, 0, 426 memset(&priv->_3945.delta_statistics, 0,
@@ -496,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
496 } 491 }
497 492
498 if (!iwl3945_mod_params.sw_crypto) 493 if (!iwl3945_mod_params.sw_crypto)
499 iwl_set_decrypted_flag(priv, 494 iwl_legacy_set_decrypted_flag(priv,
500 (struct ieee80211_hdr *)rxb_addr(rxb), 495 (struct ieee80211_hdr *)rxb_addr(rxb),
501 le32_to_cpu(rx_end->status), stats); 496 le32_to_cpu(rx_end->status), stats);
502 497
503 skb_add_rx_frag(skb, 0, rxb->page, 498 skb_add_rx_frag(skb, 0, rxb->page,
504 (void *)rx_hdr->payload - (void *)pkt, len); 499 (void *)rx_hdr->payload - (void *)pkt, len);
505 500
506 iwl_update_stats(priv, false, fc, len); 501 iwl_legacy_update_stats(priv, false, fc, len);
507 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 502 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
508 503
509 ieee80211_rx(priv->hw, skb); 504 ieee80211_rx(priv->hw, skb);
@@ -576,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
576 rx_status.signal, rx_status.signal, 571 rx_status.signal, rx_status.signal,
577 rx_status.rate_idx); 572 rx_status.rate_idx);
578 573
579 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 574 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
575 header);
580 576
581 if (network_packet) { 577 if (network_packet) {
582 priv->_3945.last_beacon_time = 578 priv->_3945.last_beacon_time =
@@ -744,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
744 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 740 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
745 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 741 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
746 station->sta.mode = STA_CONTROL_MODIFY_MSK; 742 station->sta.mode = STA_CONTROL_MODIFY_MSK;
747 iwl_send_add_sta(priv, &station->sta, CMD_ASYNC); 743 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
748 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 744 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
749 745
750 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", 746 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -759,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
759 * to set power to V_AUX, do 755 * to set power to V_AUX, do
760 756
761 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { 757 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
762 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 758 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
763 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 759 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
764 ~APMG_PS_CTRL_MSK_PWR_SRC); 760 ~APMG_PS_CTRL_MSK_PWR_SRC);
765 761
@@ -769,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
769 } 765 }
770 */ 766 */
771 767
772 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 768 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
773 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 769 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
774 ~APMG_PS_CTRL_MSK_PWR_SRC); 770 ~APMG_PS_CTRL_MSK_PWR_SRC);
775 771
@@ -779,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
779 775
780static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 776static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
781{ 777{
782 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); 778 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
783 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); 779 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
784 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); 780 rxq->rb_stts_dma);
785 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 781 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
782 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
786 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | 783 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
787 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | 784 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
788 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | 785 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -793,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
793 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); 790 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
794 791
795 /* fake read to flush all prev I/O */ 792 /* fake read to flush all prev I/O */
796 iwl_read_direct32(priv, FH39_RSSR_CTRL); 793 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
797 794
798 return 0; 795 return 0;
799} 796}
@@ -802,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
802{ 799{
803 800
804 /* bypass mode */ 801 /* bypass mode */
805 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); 802 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
806 803
807 /* RA 0 is active */ 804 /* RA 0 is active */
808 iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); 805 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
809 806
810 /* all 6 fifo are active */ 807 /* all 6 fifo are active */
811 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); 808 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
812 809
813 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); 810 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
814 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); 811 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
815 iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); 812 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
816 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 813 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
817 814
818 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, 815 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
819 priv->_3945.shared_phys); 816 priv->_3945.shared_phys);
820 817
821 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, 818 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | 820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
824 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | 821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -844,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
844 iwl3945_hw_txq_ctx_free(priv); 841 iwl3945_hw_txq_ctx_free(priv);
845 842
846 /* allocate tx queue structure */ 843 /* allocate tx queue structure */
847 rc = iwl_alloc_txq_mem(priv); 844 rc = iwl_legacy_alloc_txq_mem(priv);
848 if (rc) 845 if (rc)
849 return rc; 846 return rc;
850 847
@@ -857,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
857 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 854 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
858 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? 855 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
859 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 856 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
860 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 857 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
861 txq_id); 858 slots_num, txq_id);
862 if (rc) { 859 if (rc) {
863 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 860 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
864 goto error; 861 goto error;
@@ -875,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
875 872
876/* 873/*
877 * Start up 3945's basic functionality after it has been reset 874 * Start up 3945's basic functionality after it has been reset
878 * (e.g. after platform boot, or shutdown via iwl_apm_stop()) 875 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
879 * NOTE: This does not load uCode nor start the embedded processor 876 * NOTE: This does not load uCode nor start the embedded processor
880 */ 877 */
881static int iwl3945_apm_init(struct iwl_priv *priv) 878static int iwl3945_apm_init(struct iwl_priv *priv)
882{ 879{
883 int ret = iwl_apm_init(priv); 880 int ret = iwl_legacy_apm_init(priv);
884 881
885 /* Clear APMG (NIC's internal power management) interrupts */ 882 /* Clear APMG (NIC's internal power management) interrupts */
886 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); 883 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
887 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); 884 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
888 885
889 /* Reset radio chip */ 886 /* Reset radio chip */
890 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 887 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
888 APMG_PS_CTRL_VAL_RESET_REQ);
891 udelay(5); 889 udelay(5);
892 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 890 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
891 APMG_PS_CTRL_VAL_RESET_REQ);
893 892
894 return ret; 893 return ret;
895} 894}
@@ -898,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
898{ 897{
899 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 898 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
900 unsigned long flags; 899 unsigned long flags;
901 u8 rev_id = 0; 900 u8 rev_id = priv->pci_dev->revision;
902 901
903 spin_lock_irqsave(&priv->lock, flags); 902 spin_lock_irqsave(&priv->lock, flags);
904 903
905 /* Determine HW type */ 904 /* Determine HW type */
906 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
907
908 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 905 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
909 906
910 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 907 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
911 IWL_DEBUG_INFO(priv, "RTP type\n"); 908 IWL_DEBUG_INFO(priv, "RTP type\n");
912 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 909 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
913 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); 910 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
914 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
915 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); 912 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
916 } else { 913 } else {
917 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); 914 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
918 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 915 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
919 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); 916 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
920 } 917 }
921 918
922 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { 919 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
923 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); 920 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
924 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 921 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
925 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); 922 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
926 } else 923 } else
927 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); 924 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -929,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
929 if ((eeprom->board_revision & 0xF0) == 0xD0) { 926 if ((eeprom->board_revision & 0xF0) == 0xD0) {
930 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 927 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
931 eeprom->board_revision); 928 eeprom->board_revision);
932 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 929 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 930 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
934 } else { 931 } else {
935 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 932 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
936 eeprom->board_revision); 933 eeprom->board_revision);
937 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 934 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
938 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
939 } 936 }
940 937
941 if (eeprom->almgor_m_version <= 1) { 938 if (eeprom->almgor_m_version <= 1) {
942 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 939 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
943 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); 940 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
944 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", 941 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
945 eeprom->almgor_m_version); 942 eeprom->almgor_m_version);
946 } else { 943 } else {
947 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", 944 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
948 eeprom->almgor_m_version); 945 eeprom->almgor_m_version);
949 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 946 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
950 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); 947 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
951 } 948 }
952 spin_unlock_irqrestore(&priv->lock, flags); 949 spin_unlock_irqrestore(&priv->lock, flags);
@@ -974,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
974 971
975 /* Allocate the RX queue, or reset if it is already allocated */ 972 /* Allocate the RX queue, or reset if it is already allocated */
976 if (!rxq->bd) { 973 if (!rxq->bd) {
977 rc = iwl_rx_queue_alloc(priv); 974 rc = iwl_legacy_rx_queue_alloc(priv);
978 if (rc) { 975 if (rc) {
979 IWL_ERR(priv, "Unable to initialize Rx queue\n"); 976 IWL_ERR(priv, "Unable to initialize Rx queue\n");
980 return -ENOMEM; 977 return -ENOMEM;
@@ -989,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
989 986
990 /* Look at using this instead: 987 /* Look at using this instead:
991 rxq->need_update = 1; 988 rxq->need_update = 1;
992 iwl_rx_queue_update_write_ptr(priv, rxq); 989 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
993 */ 990 */
994 991
995 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); 992 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
996 993
997 rc = iwl3945_txq_ctx_reset(priv); 994 rc = iwl3945_txq_ctx_reset(priv);
998 if (rc) 995 if (rc)
@@ -1017,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1017 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 1014 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1018 txq_id++) 1015 txq_id++)
1019 if (txq_id == IWL39_CMD_QUEUE_NUM) 1016 if (txq_id == IWL39_CMD_QUEUE_NUM)
1020 iwl_cmd_queue_free(priv); 1017 iwl_legacy_cmd_queue_free(priv);
1021 else 1018 else
1022 iwl_tx_queue_free(priv, txq_id); 1019 iwl_legacy_tx_queue_free(priv, txq_id);
1023 1020
1024 /* free tx queue structure */ 1021 /* free tx queue structure */
1025 iwl_free_txq_mem(priv); 1022 iwl_legacy_txq_mem(priv);
1026} 1023}
1027 1024
1028void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1025void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1030,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1030 int txq_id; 1027 int txq_id;
1031 1028
1032 /* stop SCD */ 1029 /* stop SCD */
1033 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1030 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1034 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0); 1031 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1035 1032
1036 /* reset TFD queues */ 1033 /* reset TFD queues */
1037 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1034 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1038 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); 1035 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1039 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, 1036 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1040 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1037 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1041 1000); 1038 1000);
@@ -1102,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1102#define IWL_TEMPERATURE_LIMIT_TIMER 6 1099#define IWL_TEMPERATURE_LIMIT_TIMER 6
1103 1100
1104/** 1101/**
1105 * is_temp_calib_needed - determines if new calibration is needed 1102 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1106 * 1103 *
1107 * records new temperature in tx_mgr->temperature. 1104 * records new temperature in tx_mgr->temperature.
1108 * replaces tx_mgr->last_temperature *only* if calib needed 1105 * replaces tx_mgr->last_temperature *only* if calib needed
1109 * (assumes caller will actually do the calibration!). */ 1106 * (assumes caller will actually do the calibration!). */
1110static int is_temp_calib_needed(struct iwl_priv *priv) 1107static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1111{ 1108{
1112 int temp_diff; 1109 int temp_diff;
1113 1110
@@ -1338,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
1338 * based on eeprom channel data) for this channel. */ 1335 * based on eeprom channel data) for this channel. */
1339 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); 1336 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1340 1337
1341 /* further limit to user's max power preference.
1342 * FIXME: Other spectrum management power limitations do not
1343 * seem to apply?? */
1344 power = min(power, priv->tx_power_user_lmt); 1338 power = min(power, priv->tx_power_user_lmt);
1345 scan_power_info->requested_power = power; 1339 scan_power_info->requested_power = power;
1346 1340
@@ -1394,7 +1388,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1394 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); 1388 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1395 1389
1396 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1390 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1397 ch_info = iwl_get_channel_info(priv, priv->band, chan); 1391 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1398 if (!ch_info) { 1392 if (!ch_info) {
1399 IWL_ERR(priv, 1393 IWL_ERR(priv,
1400 "Failed to get channel info for channel %d [%d]\n", 1394 "Failed to get channel info for channel %d [%d]\n",
@@ -1402,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1402 return -EINVAL; 1396 return -EINVAL;
1403 } 1397 }
1404 1398
1405 if (!is_channel_valid(ch_info)) { 1399 if (!iwl_legacy_is_channel_valid(ch_info)) {
1406 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " 1400 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1407 "non-Tx channel.\n"); 1401 "non-Tx channel.\n");
1408 return 0; 1402 return 0;
@@ -1437,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1437 txpower.power[i].rate); 1431 txpower.power[i].rate);
1438 } 1432 }
1439 1433
1440 return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, 1434 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1441 sizeof(struct iwl3945_txpowertable_cmd), 1435 sizeof(struct iwl3945_txpowertable_cmd),
1442 &txpower); 1436 &txpower);
1443 1437
@@ -1571,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1571 /* set up new Tx power info for each and every channel, 2.4 and 5.x */ 1565 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1572 for (i = 0; i < priv->channel_count; i++) { 1566 for (i = 0; i < priv->channel_count; i++) {
1573 ch_info = &priv->channel_info[i]; 1567 ch_info = &priv->channel_info[i];
1574 a_band = is_channel_a_band(ch_info); 1568 a_band = iwl_legacy_is_channel_a_band(ch_info);
1575 1569
1576 /* Get this chnlgrp's factory calibration temperature */ 1570 /* Get this chnlgrp's factory calibration temperature */
1577 ref_temp = (s16)eeprom->groups[ch_info->group_index]. 1571 ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1637,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1637 1631
1638 for (i = 0; i < priv->channel_count; i++) { 1632 for (i = 0; i < priv->channel_count; i++) {
1639 ch_info = &priv->channel_info[i]; 1633 ch_info = &priv->channel_info[i];
1640 a_band = is_channel_a_band(ch_info); 1634 a_band = iwl_legacy_is_channel_a_band(ch_info);
1641 1635
1642 /* find minimum power of all user and regulatory constraints 1636 /* find minimum power of all user and regulatory constraints
1643 * (does not consider h/w clipping limitations) */ 1637 * (does not consider h/w clipping limitations) */
@@ -1653,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1653 1647
1654 /* update txpower settings for all channels, 1648 /* update txpower settings for all channels,
1655 * send to NIC if associated. */ 1649 * send to NIC if associated. */
1656 is_temp_calib_needed(priv); 1650 iwl3945_is_temp_calib_needed(priv);
1657 iwl3945_hw_reg_comp_txpower_temp(priv); 1651 iwl3945_hw_reg_comp_txpower_temp(priv);
1658 1652
1659 return 0; 1653 return 0;
@@ -1671,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1671 .flags = CMD_WANT_SKB, 1665 .flags = CMD_WANT_SKB,
1672 .data = &rxon_assoc, 1666 .data = &rxon_assoc,
1673 }; 1667 };
1674 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1668 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1675 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1669 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1676 1670
1677 if ((rxon1->flags == rxon2->flags) && 1671 if ((rxon1->flags == rxon2->flags) &&
1678 (rxon1->filter_flags == rxon2->filter_flags) && 1672 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1688,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1688 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; 1682 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1689 rxon_assoc.reserved = 0; 1683 rxon_assoc.reserved = 0;
1690 1684
1691 rc = iwl_send_cmd_sync(priv, &cmd); 1685 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1692 if (rc) 1686 if (rc)
1693 return rc; 1687 return rc;
1694 1688
@@ -1698,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1698 rc = -EIO; 1692 rc = -EIO;
1699 } 1693 }
1700 1694
1701 iwl_free_pages(priv, cmd.reply_page); 1695 iwl_legacy_free_pages(priv, cmd.reply_page);
1702 1696
1703 return rc; 1697 return rc;
1704} 1698}
@@ -1722,7 +1716,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1722 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1716 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1723 return -EINVAL; 1717 return -EINVAL;
1724 1718
1725 if (!iwl_is_alive(priv)) 1719 if (!iwl_legacy_is_alive(priv))
1726 return -1; 1720 return -1;
1727 1721
1728 /* always get timestamp with Rx frame */ 1722 /* always get timestamp with Rx frame */
@@ -1733,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1733 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 1727 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1734 staging_rxon->flags |= iwl3945_get_antenna_flags(priv); 1728 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1735 1729
1736 rc = iwl_check_rxon_cmd(priv, ctx); 1730 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1737 if (rc) { 1731 if (rc) {
1738 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1732 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1739 return -EINVAL; 1733 return -EINVAL;
@@ -1742,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1742 /* If we don't need to send a full RXON, we can use 1736 /* If we don't need to send a full RXON, we can use
1743 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 1737 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1744 * and other flags for the current radio configuration. */ 1738 * and other flags for the current radio configuration. */
1745 if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) { 1739 if (!iwl_legacy_full_rxon_required(priv,
1746 rc = iwl_send_rxon_assoc(priv, 1740 &priv->contexts[IWL_RXON_CTX_BSS])) {
1741 rc = iwl_legacy_send_rxon_assoc(priv,
1747 &priv->contexts[IWL_RXON_CTX_BSS]); 1742 &priv->contexts[IWL_RXON_CTX_BSS]);
1748 if (rc) { 1743 if (rc) {
1749 IWL_ERR(priv, "Error setting RXON_ASSOC " 1744 IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1760,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1760 * an RXON_ASSOC and the new config wants the associated mask enabled, 1755 * an RXON_ASSOC and the new config wants the associated mask enabled,
1761 * we must clear the associated from the active configuration 1756 * we must clear the associated from the active configuration
1762 * before we apply the new config */ 1757 * before we apply the new config */
1763 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { 1758 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1764 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1759 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1765 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1760 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1766 1761
@@ -1770,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1770 */ 1765 */
1771 active_rxon->reserved4 = 0; 1766 active_rxon->reserved4 = 0;
1772 active_rxon->reserved5 = 0; 1767 active_rxon->reserved5 = 0;
1773 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1768 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1774 sizeof(struct iwl3945_rxon_cmd), 1769 sizeof(struct iwl3945_rxon_cmd),
1775 &priv->contexts[IWL_RXON_CTX_BSS].active); 1770 &priv->contexts[IWL_RXON_CTX_BSS].active);
1776 1771
@@ -1782,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1782 "configuration (%d).\n", rc); 1777 "configuration (%d).\n", rc);
1783 return rc; 1778 return rc;
1784 } 1779 }
1785 iwl_clear_ucode_stations(priv, 1780 iwl_legacy_clear_ucode_stations(priv,
1781 &priv->contexts[IWL_RXON_CTX_BSS]);
1782 iwl_legacy_restore_stations(priv,
1786 &priv->contexts[IWL_RXON_CTX_BSS]); 1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1787 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1788 } 1784 }
1789 1785
1790 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1786 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1802,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1802 staging_rxon->reserved4 = 0; 1798 staging_rxon->reserved4 = 0;
1803 staging_rxon->reserved5 = 0; 1799 staging_rxon->reserved5 = 0;
1804 1800
1805 iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); 1801 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1806 1802
1807 /* Apply the new configuration */ 1803 /* Apply the new configuration */
1808 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1804 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1809 sizeof(struct iwl3945_rxon_cmd), 1805 sizeof(struct iwl3945_rxon_cmd),
1810 staging_rxon); 1806 staging_rxon);
1811 if (rc) { 1807 if (rc) {
@@ -1816,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1816 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1812 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1817 1813
1818 if (!new_assoc) { 1814 if (!new_assoc) {
1819 iwl_clear_ucode_stations(priv, 1815 iwl_legacy_clear_ucode_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]); 1816 &priv->contexts[IWL_RXON_CTX_BSS]);
1821 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); 1817 iwl_legacy_restore_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1822 } 1819 }
1823 1820
1824 /* If we issue a new RXON command which required a tune then we must 1821 /* If we issue a new RXON command which required a tune then we must
1825 * send a new TXPOWER command or we won't be able to Tx any frames */ 1822 * send a new TXPOWER command or we won't be able to Tx any frames */
1826 rc = iwl_set_tx_power(priv, priv->tx_power_next, true); 1823 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1827 if (rc) { 1824 if (rc) {
1828 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); 1825 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1829 return rc; 1826 return rc;
@@ -1853,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1853{ 1850{
1854 /* This will kick in the "brute force" 1851 /* This will kick in the "brute force"
1855 * iwl3945_hw_reg_comp_txpower_temp() below */ 1852 * iwl3945_hw_reg_comp_txpower_temp() below */
1856 if (!is_temp_calib_needed(priv)) 1853 if (!iwl3945_is_temp_calib_needed(priv))
1857 goto reschedule; 1854 goto reschedule;
1858 1855
1859 /* Set up a new set of temp-adjusted TxPowers, send to NIC. 1856 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1900,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1900 u8 grp_channel; 1897 u8 grp_channel;
1901 1898
1902 /* Find the group index for the channel ... don't use index 1(?) */ 1899 /* Find the group index for the channel ... don't use index 1(?) */
1903 if (is_channel_a_band(ch_info)) { 1900 if (iwl_legacy_is_channel_a_band(ch_info)) {
1904 for (group = 1; group < 5; group++) { 1901 for (group = 1; group < 5; group++) {
1905 grp_channel = ch_grp[group].group_channel; 1902 grp_channel = ch_grp[group].group_channel;
1906 if (ch_info->channel <= grp_channel) { 1903 if (ch_info->channel <= grp_channel) {
@@ -2080,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2080 /* initialize Tx power info for each and every channel, 2.4 and 5.x */ 2077 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2081 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; 2078 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2082 i++, ch_info++) { 2079 i++, ch_info++) {
2083 a_band = is_channel_a_band(ch_info); 2080 a_band = iwl_legacy_is_channel_a_band(ch_info);
2084 if (!is_channel_valid(ch_info)) 2081 if (!iwl_legacy_is_channel_valid(ch_info))
2085 continue; 2082 continue;
2086 2083
2087 /* find this channel's channel group (*not* "band") index */ 2084 /* find this channel's channel group (*not* "band") index */
@@ -2184,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2184{ 2181{
2185 int rc; 2182 int rc;
2186 2183
2187 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); 2184 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2188 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, 2185 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2189 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 2186 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2190 if (rc < 0) 2187 if (rc < 0)
@@ -2201,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2201 2198
2202 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2199 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2203 2200
2204 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); 2201 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2205 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); 2202 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2206 2203
2207 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 2204 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2208 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | 2205 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | 2206 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | 2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2233,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2233} 2230}
2234 2231
2235 2232
2236static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 2233static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2234 u8 *data)
2237{ 2235{
2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; 2236 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2239 addsta->mode = cmd->mode; 2237 addsta->mode = cmd->mode;
@@ -2261,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2261 if (sta_id_r) 2259 if (sta_id_r)
2262 *sta_id_r = IWL_INVALID_STATION; 2260 *sta_id_r = IWL_INVALID_STATION;
2263 2261
2264 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); 2262 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2265 if (ret) { 2263 if (ret) {
2266 IWL_ERR(priv, "Unable to add station %pM\n", addr); 2264 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2267 return ret; 2265 return ret;
@@ -2296,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2296 return 0; 2294 return 0;
2297 } 2295 }
2298 2296
2299 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 2297 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2300 vif->bss_conf.bssid); 2298 vif->bss_conf.bssid);
2301} 2299}
2302 2300
@@ -2347,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2347 * 1M CCK rates */ 2345 * 1M CCK rates */
2348 2346
2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 2347 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2350 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2348 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2351 2349
2352 index = IWL_FIRST_CCK_RATE; 2350 index = IWL_FIRST_CCK_RATE;
2353 for (i = IWL_RATE_6M_INDEX_TABLE; 2351 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2368,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2368 2366
2369 /* Update the rate scaling for control frame Tx */ 2367 /* Update the rate scaling for control frame Tx */
2370 rate_cmd.table_id = 0; 2368 rate_cmd.table_id = 0;
2371 rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2369 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2372 &rate_cmd); 2370 &rate_cmd);
2373 if (rc) 2371 if (rc)
2374 return rc; 2372 return rc;
2375 2373
2376 /* Update the rate scaling for data frame Tx */ 2374 /* Update the rate scaling for data frame Tx */
2377 rate_cmd.table_id = 1; 2375 rate_cmd.table_id = 1;
2378 return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2376 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2379 &rate_cmd); 2377 &rate_cmd);
2380} 2378}
2381 2379
@@ -2475,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 2473 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2476 2474
2477 /* verify BSM SRAM contents */ 2475 /* verify BSM SRAM contents */
2478 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 2476 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2479 for (reg = BSM_SRAM_LOWER_BOUND; 2477 for (reg = BSM_SRAM_LOWER_BOUND;
2480 reg < BSM_SRAM_LOWER_BOUND + len; 2478 reg < BSM_SRAM_LOWER_BOUND + len;
2481 reg += sizeof(u32), image++) { 2479 reg += sizeof(u32), image++) {
2482 val = iwl_read_prph(priv, reg); 2480 val = iwl_legacy_read_prph(priv, reg);
2483 if (val != le32_to_cpu(*image)) { 2481 if (val != le32_to_cpu(*image)) {
2484 IWL_ERR(priv, "BSM uCode verification failed at " 2482 IWL_ERR(priv, "BSM uCode verification failed at "
2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 2483 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2512,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2512 */ 2510 */
2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) 2511static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2514{ 2512{
2515 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); 2513 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2516 return 0; 2514 return 0;
2517} 2515}
2518 2516
@@ -2583,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2583 inst_len = priv->ucode_init.len; 2581 inst_len = priv->ucode_init.len;
2584 data_len = priv->ucode_init_data.len; 2582 data_len = priv->ucode_init_data.len;
2585 2583
2586 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2584 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2587 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2585 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2588 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2589 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2590 2588
2591 /* Fill BSM memory with bootstrap instructions */ 2589 /* Fill BSM memory with bootstrap instructions */
2592 for (reg_offset = BSM_SRAM_LOWER_BOUND; 2590 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2593 reg_offset < BSM_SRAM_LOWER_BOUND + len; 2591 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2594 reg_offset += sizeof(u32), image++) 2592 reg_offset += sizeof(u32), image++)
2595 _iwl_write_prph(priv, reg_offset, 2593 _iwl_legacy_write_prph(priv, reg_offset,
2596 le32_to_cpu(*image)); 2594 le32_to_cpu(*image));
2597 2595
2598 rc = iwl3945_verify_bsm(priv); 2596 rc = iwl3945_verify_bsm(priv);
@@ -2600,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2600 return rc; 2598 return rc;
2601 2599
2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 2600 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2603 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 2601 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2604 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, 2602 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2605 IWL39_RTC_INST_LOWER_BOUND); 2603 IWL39_RTC_INST_LOWER_BOUND);
2606 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 2604 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2607 2605
2608 /* Load bootstrap code into instruction SRAM now, 2606 /* Load bootstrap code into instruction SRAM now,
2609 * to prepare to load "initialize" uCode */ 2607 * to prepare to load "initialize" uCode */
2610 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2608 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2611 BSM_WR_CTRL_REG_BIT_START); 2609 BSM_WR_CTRL_REG_BIT_START);
2612 2610
2613 /* Wait for load of bootstrap uCode to finish */ 2611 /* Wait for load of bootstrap uCode to finish */
2614 for (i = 0; i < 100; i++) { 2612 for (i = 0; i < 100; i++) {
2615 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 2613 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2616 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 2614 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2617 break; 2615 break;
2618 udelay(10); 2616 udelay(10);
@@ -2626,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2626 2624
2627 /* Enable future boot loads whenever power management unit triggers it 2625 /* Enable future boot loads whenever power management unit triggers it
2628 * (e.g. when powering back up after power-save shutdown) */ 2626 * (e.g. when powering back up after power-save shutdown) */
2629 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2627 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2630 BSM_WR_CTRL_REG_BIT_START_EN); 2628 BSM_WR_CTRL_REG_BIT_START_EN);
2631 2629
2632 return 0; 2630 return 0;
@@ -2635,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2635static struct iwl_hcmd_ops iwl3945_hcmd = { 2633static struct iwl_hcmd_ops iwl3945_hcmd = {
2636 .rxon_assoc = iwl3945_send_rxon_assoc, 2634 .rxon_assoc = iwl3945_send_rxon_assoc,
2637 .commit_rxon = iwl3945_commit_rxon, 2635 .commit_rxon = iwl3945_commit_rxon,
2638 .send_bt_config = iwl_send_bt_config,
2639}; 2636};
2640 2637
2641static struct iwl_lib_ops iwl3945_lib = { 2638static struct iwl_lib_ops iwl3945_lib = {
@@ -2661,13 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = {
2661 }, 2658 },
2662 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, 2659 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2663 .release_semaphore = iwl3945_eeprom_release_semaphore, 2660 .release_semaphore = iwl3945_eeprom_release_semaphore,
2664 .query_addr = iwlcore_eeprom_query_addr,
2665 }, 2661 },
2666 .send_tx_power = iwl3945_send_tx_power, 2662 .send_tx_power = iwl3945_send_tx_power,
2667 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, 2663 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2668 .isr_ops = {
2669 .isr = iwl_isr_legacy,
2670 },
2671 2664
2672 .debugfs_ops = { 2665 .debugfs_ops = {
2673 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2666 .rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2685,7 +2678,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2685static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2678static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2686 .get_hcmd_size = iwl3945_get_hcmd_size, 2679 .get_hcmd_size = iwl3945_get_hcmd_size,
2687 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2680 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2688 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2689 .request_scan = iwl3945_request_scan, 2681 .request_scan = iwl3945_request_scan,
2690 .post_scan = iwl3945_post_scan, 2682 .post_scan = iwl3945_post_scan,
2691}; 2683};
@@ -2705,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = {
2705 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, 2697 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2706 .set_l0s = false, 2698 .set_l0s = false,
2707 .use_bsm = true, 2699 .use_bsm = true,
2708 .use_isr_legacy = true,
2709 .led_compensation = 64, 2700 .led_compensation = 64,
2710 .broken_powersave = true,
2711 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2701 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2712 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2713 .max_event_log_size = 512, 2703 .max_event_log_size = 512,
2714 .tx_power_by_driver = true,
2715}; 2704};
2716 2705
2717static struct iwl_cfg iwl3945_bg_cfg = { 2706static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
index 3eef1eb74a78..b118b59b71de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
108 108
109/* 109/*
110 * The common struct MUST be first because it is shared between 110 * The common struct MUST be first because it is shared between
111 * 3945 and agn! 111 * 3945 and 4965!
112 */ 112 */
113struct iwl3945_sta_priv { 113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common; 114 struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
201 201
202/****************************************************************************** 202/******************************************************************************
203 * 203 *
204 * Functions implemented in iwl-base.c which are forward declared here 204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c 205 * for use by iwl-*.c
206 * 206 *
207 *****************************************************************************/ 207 *****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data); 209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr,int left); 212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display); 214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
217/****************************************************************************** 217/******************************************************************************
218 * 218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here 219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl-base.c 220 * for use by iwl3945-base.c
221 * 221 *
222 * NOTE: The implementation of these functions are hardware specific 222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c) 223 * which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
283extern struct ieee80211_ops iwl3945_hw_ops; 283extern struct ieee80211_ops iwl3945_hw_ops;
284 284
285/* 285/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */ 287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); 288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); 289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-4965-calib.h"
69
70/*****************************************************************************
71 * INIT calibrations framework
72 *****************************************************************************/
73
74struct statistics_general_data {
75 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c;
78 u32 beacon_energy_a;
79 u32 beacon_energy_b;
80 u32 beacon_energy_c;
81};
82
83void iwl4965_calib_free_results(struct iwl_priv *priv)
84{
85 int i;
86
87 for (i = 0; i < IWL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0;
91 }
92}
93
94/*****************************************************************************
95 * RUNTIME calibrations framework
96 *****************************************************************************/
97
98/* "false alarms" are signals that our DSP tries to lock onto,
99 * but then determines that they are either noise, or transmissions
100 * from a distant wireless network (also "noise", really) that get
101 * "stepped on" by stronger transmissions within our own network.
102 * This algorithm attempts to set a sensitivity level that is high
103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
107 u32 norm_fa,
108 u32 rx_enable_time,
109 struct statistics_general_data *rx_info)
110{
111 u32 max_nrg_cck = 0;
112 int i = 0;
113 u8 max_silence_rssi = 0;
114 u32 silence_ref = 0;
115 u8 silence_rssi_a = 0;
116 u8 silence_rssi_b = 0;
117 u8 silence_rssi_c = 0;
118 u32 val;
119
120 /* "false_alarms" values below are cross-multiplications to assess the
121 * numbers of false alarms within the measured period of actual Rx
122 * (Rx is off when we're txing), vs the min/max expected false alarms
123 * (some should be expected if rx is sensitive enough) in a
124 * hypothetical listening period of 200 time units (TU), 204.8 msec:
125 *
126 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
127 *
128 * */
129 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
134
135 data = &(priv->sensitivity_data);
136
137 data->nrg_auto_corr_silence_diff = 0;
138
139 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
143 ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
145 ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
147 ALL_BAND_FILTER) >> 8);
148
149 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val);
151
152 /* Store silence rssi in 20-beacon history table */
153 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
154 data->nrg_silence_idx++;
155 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
156 data->nrg_silence_idx = 0;
157
158 /* Find max silence rssi across 20 beacon history */
159 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
160 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val);
162 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
164 silence_rssi_a, silence_rssi_b, silence_rssi_c,
165 silence_ref);
166
167 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame.
169 * Save it in 10-beacon history table. */
170 i = data->nrg_energy_idx;
171 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
172 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
173
174 data->nrg_energy_idx++;
175 if (data->nrg_energy_idx >= 10)
176 data->nrg_energy_idx = 0;
177
178 /* Find min rx energy (max value) across 10 beacon history.
179 * This is the minimum signal level that we want to receive well.
180 * Add backoff (margin so we don't miss slightly lower energy frames).
181 * This establishes an upper bound (min value) for energy threshold. */
182 max_nrg_cck = data->nrg_value[0];
183 for (i = 1; i < 10; i++)
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6;
186
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6);
190
191 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */
193 if (false_alarms < min_false_alarms)
194 data->num_in_cck_no_fa++;
195 else
196 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa);
199
200 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
204 false_alarms, max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref;
209
210 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW;
216
217 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
220 (s32)silence_ref;
221
222 IWL_DEBUG_CALIB(priv,
223 "norm FA %u < min FA %u, silence diff %d\n",
224 false_alarms, min_false_alarms,
225 data->nrg_auto_corr_silence_diff);
226
227 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms
229 * 1b) AND there's a significant difference in Rx levels
230 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
236
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
241 } else {
242 IWL_DEBUG_CALIB(priv,
243 "... but not changing sensitivity\n");
244 }
245
246 /* Else we got a healthy number of false alarms, keep status quo */
247 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
250
251 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN;
261 else
262 data->nrg_th_cck = max_nrg_cck;
263 }
264 }
265
266 /* Make sure the energy threshold does not go above the measured
267 * energy of the desired Rx signals (reduced by backoff margin),
268 * or else we might start missing Rx frames.
269 * Lower value is higher energy, so we use max()!
270 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
273
274 data->nrg_prev_state = data->nrg_curr_state;
275
276 /* Auto-correlation CCK algorithm */
277 if (false_alarms > min_false_alarms) {
278
279 /* increase auto_corr values to decrease sensitivity
280 * so the DSP won't be disturbed by the noise
281 */
282 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
283 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
284 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val);
288 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck =
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val);
303 }
304
305 return 0;
306}
307
308
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
310 u32 norm_fa,
311 u32 rx_enable_time)
312{
313 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
319
320 data = &(priv->sensitivity_data);
321
322 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) {
324
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
326 false_alarms, max_false_alarms);
327
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val);
331
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
335
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val);
339
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
343 }
344
345 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) {
347
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
349 false_alarms, min_false_alarms);
350
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val);
354
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
358
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val);
362
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms);
369 }
370 return 0;
371}
372
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
374 struct iwl_sensitivity_data *data,
375 __le16 *tbl)
376{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
385
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
387 cpu_to_le16((u16)data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc);
390
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
392 cpu_to_le16((u16)data->nrg_th_cck);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
394 cpu_to_le16((u16)data->nrg_th_ofdm);
395
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
397 cpu_to_le16(data->barker_corr_th_min);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
399 cpu_to_le16(data->barker_corr_th_min_mrc);
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
401 cpu_to_le16(data->nrg_th_cca);
402
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
406 data->nrg_th_ofdm);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411}
412
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv)
415{
416 struct iwl_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD,
420 .len = sizeof(struct iwl_sensitivity_cmd),
421 .flags = CMD_ASYNC,
422 .data = &cmd,
423 };
424
425 data = &(priv->sensitivity_data);
426
427 memset(&cmd, 0, sizeof(cmd));
428
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
430
431 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
433
434 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
436 sizeof(u16)*HD_TABLE_SIZE)) {
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
438 return 0;
439 }
440
441 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE);
444
445 return iwl_legacy_send_cmd(priv, &cmd_out);
446}
447
448void iwl4965_init_sensitivity(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int i;
452 struct iwl_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
454
455 if (priv->disable_sens_cal)
456 return;
457
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
459
460 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data);
462
463 if (ranges == NULL)
464 return;
465
466 memset(data, 0, sizeof(struct iwl_sensitivity_data));
467
468 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0;
474
475 for (i = 0; i < 10; i++)
476 data->nrg_value[i] = 0;
477
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0;
480
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
487 data->nrg_th_cck = ranges->nrg_th_cck;
488 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
489 data->barker_corr_th_min = ranges->barker_corr_th_min;
490 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
491 data->nrg_th_cca = ranges->nrg_th_cca;
492
493 data->last_bad_plcp_cnt_ofdm = 0;
494 data->last_fa_cnt_ofdm = 0;
495 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0;
497
498 ret |= iwl4965_sensitivity_write(priv);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
500}
501
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
503{
504 u32 rx_enable_time;
505 u32 fa_cck;
506 u32 fa_ofdm;
507 u32 bad_plcp_cck;
508 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm;
510 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck;
514 unsigned long flags;
515 struct statistics_general_data statis;
516
517 if (priv->disable_sens_cal)
518 return;
519
520 data = &(priv->sensitivity_data);
521
522 if (!iwl_legacy_is_any_associated(priv)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
524 return;
525 }
526
527 spin_lock_irqsave(&priv->lock, flags);
528
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
532
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags);
536 return;
537 }
538
539 /* Extract Statistics: */
540 rx_enable_time = le32_to_cpu(rx_info->channel_load);
541 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
542 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
543 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545
546 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a =
553 le32_to_cpu(rx_info->beacon_energy_a);
554 statis.beacon_energy_b =
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558
559 spin_unlock_irqrestore(&priv->lock, flags);
560
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
562
563 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
565 return;
566 }
567
568 /* These statistics increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else {
574 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
575 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
576 }
577
578 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
579 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
580 else {
581 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
582 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
583 }
584
585 if (data->last_fa_cnt_ofdm > fa_ofdm)
586 data->last_fa_cnt_ofdm = fa_ofdm;
587 else {
588 fa_ofdm -= data->last_fa_cnt_ofdm;
589 data->last_fa_cnt_ofdm += fa_ofdm;
590 }
591
592 if (data->last_fa_cnt_cck > fa_cck)
593 data->last_fa_cnt_cck = fa_cck;
594 else {
595 fa_cck -= data->last_fa_cnt_cck;
596 data->last_fa_cnt_cck += fa_cck;
597 }
598
599 /* Total aborted signal locks */
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck;
602
603 IWL_DEBUG_CALIB(priv,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
609
610 iwl4965_sensitivity_write(priv);
611}
612
613static inline u8 iwl4965_find_first_chain(u8 mask)
614{
615 if (mask & ANT_A)
616 return CHAIN_A;
617 if (mask & ANT_B)
618 return CHAIN_B;
619 return CHAIN_C;
620}
621
622/**
623 * Run disconnected antenna algorithm to find out which antennas are
624 * disconnected.
625 */
626static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
628 struct iwl_chain_noise_data *data)
629{
630 u32 active_chains = 0;
631 u32 max_average_sig;
632 u16 max_average_sig_antenna_i;
633 u8 num_tx_chains;
634 u8 first_chain;
635 u16 i = 0;
636
637 average_sig[0] = data->chain_signal_a /
638 priv->cfg->base_params->chain_noise_num_beacons;
639 average_sig[1] = data->chain_signal_b /
640 priv->cfg->base_params->chain_noise_num_beacons;
641 average_sig[2] = data->chain_signal_c /
642 priv->cfg->base_params->chain_noise_num_beacons;
643
644 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0];
646 max_average_sig_antenna_i = 0;
647 active_chains = (1 << max_average_sig_antenna_i);
648 } else {
649 max_average_sig = average_sig[1];
650 max_average_sig_antenna_i = 1;
651 active_chains = (1 << max_average_sig_antenna_i);
652 }
653
654 if (average_sig[2] >= max_average_sig) {
655 max_average_sig = average_sig[2];
656 max_average_sig_antenna_i = 2;
657 active_chains = (1 << max_average_sig_antenna_i);
658 }
659
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
661 average_sig[0], average_sig[1], average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
663 max_average_sig, max_average_sig_antenna_i);
664
665 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) {
667 if (i != max_average_sig_antenna_i) {
668 s32 rssi_delta = (max_average_sig - average_sig[i]);
669
670 /* If signal is very weak, compared with
671 * strongest, mark it as disconnected. */
672 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
673 data->disconn_array[i] = 1;
674 else
675 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n",
678 i, rssi_delta, data->disconn_array[i]);
679 }
680 }
681
682 /*
683 * The above algorithm sometimes fails when the ucode
684 * reports 0 for all chains. It's not clear why that
685 * happens to start with, but it is then causing trouble
686 * because this can make us enable more chains than the
687 * hardware really has.
688 *
689 * To be safe, simply mask out any chains that we know
690 * are not on the device.
691 */
692 active_chains &= priv->hw_params.valid_rx_ant;
693
694 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk))
700 continue;
701
702 num_tx_chains++;
703 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */
705 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num &&
707 data->disconn_array[i]) {
708 /*
709 * If all chains are disconnected
710 * connect the first valid tx chain
711 */
712 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
717 W/A - declare %d as connected\n",
718 first_chain);
719 break;
720 }
721 }
722
723 if (active_chains != priv->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv,
726 "Detected that not all antennas are connected! "
727 "Connected: %#x, valid: %#x.\n",
728 active_chains, priv->hw_params.valid_rx_ant);
729
730 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
733 active_chains);
734}
735
736static void iwl4965_gain_computation(struct iwl_priv *priv,
737 u32 *average_noise,
738 u16 min_average_noise_antenna_i,
739 u32 min_average_noise,
740 u8 default_chain)
741{
742 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
744
745 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0;
749
750 if (!(data->disconn_array[i]) &&
751 (data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
753 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
755 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758
759 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2));
761 } else {
762 data->delta_gain_code[i] = 0;
763 }
764 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
766 data->delta_gain_code[0],
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769
770 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd;
773 data->radio_write = 1;
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
781 sizeof(cmd), &cmd);
782 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd "
784 "REPLY_PHY_CALIBRATION_CMD\n");
785
786 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */
788
789 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED;
791 }
792}
793
794
795
796/*
797 * Accumulate 16 beacons of signal and noise statistics for each of
798 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers.
801 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
803{
804 struct iwl_chain_noise_data *data = NULL;
805
806 u32 chain_noise_a;
807 u32 chain_noise_b;
808 u32 chain_noise_c;
809 u32 chain_sig_a;
810 u32 chain_sig_b;
811 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0;
817 u16 rxon_chnum = INITIALIZATION_VALUE;
818 u16 stat_chnum = INITIALIZATION_VALUE;
819 u8 rxon_band24;
820 u8 stat_band24;
821 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info;
823
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
825
826 if (priv->disable_chain_noise_cal)
827 return;
828
829 data = &(priv->chain_noise_data);
830
831 /*
832 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever.
834 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
838 return;
839 }
840
841 spin_lock_irqsave(&priv->lock, flags);
842
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
844 rx.general);
845
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags);
849 return;
850 }
851
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854
855 stat_band24 = !!(((struct iwl_notif_statistics *)
856 stat_resp)->flag &
857 STATISTICS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
859 stat_resp)->flag) >> 16;
860
861 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
865 rxon_chnum, rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags);
867 return;
868 }
869
870 /*
871 * Accumulate beacon statistics values across
872 * "chain_noise_num_beacons"
873 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
875 IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
877 IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
879 IN_BAND_FILTER;
880
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884
885 spin_unlock_irqrestore(&priv->lock, flags);
886
887 data->beacon_count++;
888
889 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
890 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
891 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
892
893 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
898 rxon_chnum, rxon_band24, data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
900 chain_sig_a, chain_sig_b, chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
902 chain_noise_a, chain_noise_b, chain_noise_c);
903
904 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count !=
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return;
910
911 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data);
913
914 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a /
916 priv->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b /
918 priv->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c /
920 priv->cfg->base_params->chain_noise_num_beacons;
921
922 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) &&
924 (average_noise[i] <= min_average_noise)) {
925 /* This means that chain i is active and has
926 * lower noise values so far: */
927 min_average_noise = average_noise[i];
928 min_average_noise_antenna_i = i;
929 }
930 }
931
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
933 average_noise[0], average_noise[1],
934 average_noise[2]);
935
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
937 min_average_noise, min_average_noise_antenna_i);
938
939 iwl4965_gain_computation(priv, average_noise,
940 min_average_noise_antenna_i, min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
942
943 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON
945 */
946 if (priv->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv);
948
949 data->state = IWL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false);
951}
952
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
954{
955 int i;
956 memset(&(priv->sensitivity_data), 0,
957 sizeof(struct iwl_sensitivity_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963
964 /* Ask for statistics now, the uCode will send notification
965 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
967}
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
index 9f7b2f935964..f46c80e6e005 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -59,21 +59,17 @@
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
62 64
63#ifndef __iwl_legacy_h__ 65#include "iwl-dev.h"
64#define __iwl_legacy_h__ 66#include "iwl-core.h"
67#include "iwl-commands.h"
65 68
66/* mac80211 handlers */ 69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
67int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); 70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
68void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); 71void iwl4965_init_sensitivity(struct iwl_priv *priv);
69void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, 72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
70 struct ieee80211_vif *vif, 73void iwl4965_calib_free_results(struct iwl_priv *priv);
71 struct ieee80211_bss_conf *bss_conf,
72 u32 changes);
73void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
74 struct ieee80211_tx_info *info,
75 __le16 fc, __le32 *tx_flags);
76 74
77irqreturn_t iwl_isr_legacy(int irq, void *data); 75#endif /* __iwl_4965_calib_h__ */
78
79#endif /* __iwl_legacy_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794eda0d..08b189c8472d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; 789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed; 790} __packed;
791 791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10
812
813
792#endif /* !__iwl_4965_hw_h__ */ 814#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-4965-led.h"
45
46/* Send led command */
47static int
48iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57 u32 reg;
58
59 reg = iwl_read32(priv, CSR_LED_REG);
60 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
61 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
62
63 return iwl_legacy_send_cmd(priv, &cmd);
64}
65
66/* Set led register off */
67void iwl4965_led_enable(struct iwl_priv *priv)
68{
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70}
71
72const struct iwl_led_ops iwl4965_led_ops = {
73 .cmd = iwl4965_send_led_cmd,
74};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
index 8e350eea3422..5ed3615fc338 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -19,12 +19,15 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 * 24 *
28 *****************************************************************************/ 25 *****************************************************************************/
29 26
30#include "../rtl8192ce/fw.c" 27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..5a8a3cce27bc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 struct iwl_scan_channel *scan_ch)
701{
702 const struct ieee80211_supported_band *sband;
703 u16 passive_dwell = 0;
704 u16 active_dwell = 0;
705 int added = 0;
706 u16 channel = 0;
707
708 sband = iwl_get_hw_mode(priv, band);
709 if (!sband) {
710 IWL_ERR(priv, "invalid band\n");
711 return added;
712 }
713
714 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
715 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
716
717 if (passive_dwell <= active_dwell)
718 passive_dwell = active_dwell + 1;
719
720 channel = iwl_legacy_get_single_channel_number(priv, band);
721 if (channel) {
722 scan_ch->channel = cpu_to_le16(channel);
723 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
724 scan_ch->active_dwell = cpu_to_le16(active_dwell);
725 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
726 /* Set txpower levels to defaults */
727 scan_ch->dsp_atten = 110;
728 if (band == IEEE80211_BAND_5GHZ)
729 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
730 else
731 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
732 added++;
733 } else
734 IWL_ERR(priv, "no valid channel found\n");
735 return added;
736}
737
738static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
739 struct ieee80211_vif *vif,
740 enum ieee80211_band band,
741 u8 is_active, u8 n_probes,
742 struct iwl_scan_channel *scan_ch)
743{
744 struct ieee80211_channel *chan;
745 const struct ieee80211_supported_band *sband;
746 const struct iwl_channel_info *ch_info;
747 u16 passive_dwell = 0;
748 u16 active_dwell = 0;
749 int added, i;
750 u16 channel;
751
752 sband = iwl_get_hw_mode(priv, band);
753 if (!sband)
754 return 0;
755
756 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
757 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
758
759 if (passive_dwell <= active_dwell)
760 passive_dwell = active_dwell + 1;
761
762 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
763 chan = priv->scan_request->channels[i];
764
765 if (chan->band != band)
766 continue;
767
768 channel = chan->hw_value;
769 scan_ch->channel = cpu_to_le16(channel);
770
771 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
772 if (!iwl_legacy_is_channel_valid(ch_info)) {
773 IWL_DEBUG_SCAN(priv,
774 "Channel %d is INVALID for this band.\n",
775 channel);
776 continue;
777 }
778
779 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
780 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
781 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
782 else
783 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
784
785 if (n_probes)
786 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
787
788 scan_ch->active_dwell = cpu_to_le16(active_dwell);
789 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
790
791 /* Set txpower levels to defaults */
792 scan_ch->dsp_atten = 110;
793
794 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
795 * power level:
796 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
797 */
798 if (band == IEEE80211_BAND_5GHZ)
799 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
800 else
801 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
802
803 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
804 channel, le32_to_cpu(scan_ch->type),
805 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
806 "ACTIVE" : "PASSIVE",
807 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
808 active_dwell : passive_dwell);
809
810 scan_ch++;
811 added++;
812 }
813
814 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
815 return added;
816}
817
818int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
819{
820 struct iwl_host_cmd cmd = {
821 .id = REPLY_SCAN_CMD,
822 .len = sizeof(struct iwl_scan_cmd),
823 .flags = CMD_SIZE_HUGE,
824 };
825 struct iwl_scan_cmd *scan;
826 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
827 u32 rate_flags = 0;
828 u16 cmd_len;
829 u16 rx_chain = 0;
830 enum ieee80211_band band;
831 u8 n_probes = 0;
832 u8 rx_ant = priv->hw_params.valid_rx_ant;
833 u8 rate;
834 bool is_active = false;
835 int chan_mod;
836 u8 active_chains;
837 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
838 int ret;
839
840 lockdep_assert_held(&priv->mutex);
841
842 if (vif)
843 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
844
845 if (!priv->scan_cmd) {
846 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
847 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
848 if (!priv->scan_cmd) {
849 IWL_DEBUG_SCAN(priv,
850 "fail to allocate memory for scan\n");
851 return -ENOMEM;
852 }
853 }
854 scan = priv->scan_cmd;
855 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
856
857 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
858 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
859
860 if (iwl_legacy_is_any_associated(priv)) {
861 u16 interval = 0;
862 u32 extra;
863 u32 suspend_time = 100;
864 u32 scan_suspend_time = 100;
865
866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
867 if (priv->is_internal_short_scan)
868 interval = 0;
869 else
870 interval = vif->bss_conf.beacon_int;
871
872 scan->suspend_time = 0;
873 scan->max_out_time = cpu_to_le32(200 * 1024);
874 if (!interval)
875 interval = suspend_time;
876
877 extra = (suspend_time / interval) << 22;
878 scan_suspend_time = (extra |
879 ((suspend_time % interval) * 1024));
880 scan->suspend_time = cpu_to_le32(scan_suspend_time);
881 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
882 scan_suspend_time, interval);
883 }
884
885 if (priv->is_internal_short_scan) {
886 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
887 } else if (priv->scan_request->n_ssids) {
888 int i, p = 0;
889 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
890 for (i = 0; i < priv->scan_request->n_ssids; i++) {
891 /* always does wildcard anyway */
892 if (!priv->scan_request->ssids[i].ssid_len)
893 continue;
894 scan->direct_scan[p].id = WLAN_EID_SSID;
895 scan->direct_scan[p].len =
896 priv->scan_request->ssids[i].ssid_len;
897 memcpy(scan->direct_scan[p].ssid,
898 priv->scan_request->ssids[i].ssid,
899 priv->scan_request->ssids[i].ssid_len);
900 n_probes++;
901 p++;
902 }
903 is_active = true;
904 } else
905 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
906
907 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
908 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
909 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
910
911 switch (priv->scan_band) {
912 case IEEE80211_BAND_2GHZ:
913 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
914 chan_mod = le32_to_cpu(
915 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
916 RXON_FLG_CHANNEL_MODE_MSK)
917 >> RXON_FLG_CHANNEL_MODE_POS;
918 if (chan_mod == CHANNEL_MODE_PURE_40) {
919 rate = IWL_RATE_6M_PLCP;
920 } else {
921 rate = IWL_RATE_1M_PLCP;
922 rate_flags = RATE_MCS_CCK_MSK;
923 }
924 break;
925 case IEEE80211_BAND_5GHZ:
926 rate = IWL_RATE_6M_PLCP;
927 break;
928 default:
929 IWL_WARN(priv, "Invalid scan band\n");
930 return -EIO;
931 }
932
933 /*
934 * If active scanning is requested but a certain channel is
935 * marked passive, we can do active scanning if we detect
936 * transmissions.
937 *
938 * There is an issue with some firmware versions that triggers
939 * a sysassert on a "good CRC threshold" of zero (== disabled),
940 * on a radar channel even though this means that we should NOT
941 * send probes.
942 *
943 * The "good CRC threshold" is the number of frames that we
944 * need to receive during our dwell time on a channel before
945 * sending out probes -- setting this to a huge value will
946 * mean we never reach it, but at the same time work around
947 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
948 * here instead of IWL_GOOD_CRC_TH_DISABLED.
949 */
950 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
951 IWL_GOOD_CRC_TH_NEVER;
952
953 band = priv->scan_band;
954
955 if (priv->cfg->scan_rx_antennas[band])
956 rx_ant = priv->cfg->scan_rx_antennas[band];
957
958 if (priv->cfg->scan_tx_antennas[band])
959 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
960
961 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
962 priv->scan_tx_ant[band],
963 scan_tx_antennas);
964 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
965 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
966
967 /* In power save mode use one chain, otherwise use all chains */
968 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
969 /* rx_ant has been set to all valid chains previously */
970 active_chains = rx_ant &
971 ((u8)(priv->chain_noise_data.active_chains));
972 if (!active_chains)
973 active_chains = rx_ant;
974
975 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
976 priv->chain_noise_data.active_chains);
977
978 rx_ant = iwl4965_first_antenna(active_chains);
979 }
980
981 /* MIMO is not used here, but value is required */
982 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
983 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
985 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
986 scan->rx_chain = cpu_to_le16(rx_chain);
987 if (!priv->is_internal_short_scan) {
988 cmd_len = iwl_legacy_fill_probe_req(priv,
989 (struct ieee80211_mgmt *)scan->data,
990 vif->addr,
991 priv->scan_request->ie,
992 priv->scan_request->ie_len,
993 IWL_MAX_SCAN_SIZE - sizeof(*scan));
994 } else {
995 /* use bcast addr, will not be transmitted but must be valid */
996 cmd_len = iwl_legacy_fill_probe_req(priv,
997 (struct ieee80211_mgmt *)scan->data,
998 iwlegacy_bcast_addr, NULL, 0,
999 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1000
1001 }
1002 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1003
1004 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1005 RXON_FILTER_BCON_AWARE_MSK);
1006
1007 if (priv->is_internal_short_scan) {
1008 scan->channel_count =
1009 iwl4965_get_single_channel_for_scan(priv, vif, band,
1010 (void *)&scan->data[le16_to_cpu(
1011 scan->tx_cmd.len)]);
1012 } else {
1013 scan->channel_count =
1014 iwl4965_get_channels_for_scan(priv, vif, band,
1015 is_active, n_probes,
1016 (void *)&scan->data[le16_to_cpu(
1017 scan->tx_cmd.len)]);
1018 }
1019 if (scan->channel_count == 0) {
1020 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1021 return -EIO;
1022 }
1023
1024 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1025 scan->channel_count * sizeof(struct iwl_scan_channel);
1026 cmd.data = scan;
1027 scan->len = cpu_to_le16(cmd.len);
1028
1029 set_bit(STATUS_SCAN_HW, &priv->status);
1030
1031 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
1032 if (ret)
1033 clear_bit(STATUS_SCAN_HW, &priv->status);
1034
1035 return ret;
1036}
1037
1038int iwl4965_manage_ibss_station(struct iwl_priv *priv,
1039 struct ieee80211_vif *vif, bool add)
1040{
1041 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1042
1043 if (add)
1044 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
1045 vif->bss_conf.bssid,
1046 &vif_priv->ibss_bssid_sta_id);
1047 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1048 vif->bss_conf.bssid);
1049}
1050
1051void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
1052 int sta_id, int tid, int freed)
1053{
1054 lockdep_assert_held(&priv->sta_lock);
1055
1056 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1057 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1058 else {
1059 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1060 priv->stations[sta_id].tid[tid].tfds_in_queue,
1061 freed);
1062 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1063 }
1064}
1065
1066#define IWL_TX_QUEUE_MSK 0xfffff
1067
1068static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1069{
1070 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1071 priv->current_ht_config.single_chain_sufficient;
1072}
1073
1074#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1075#define IWL_NUM_RX_CHAINS_SINGLE 2
1076#define IWL_NUM_IDLE_CHAINS_DUAL 2
1077#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1078
1079/*
1080 * Determine how many receiver/antenna chains to use.
1081 *
1082 * More provides better reception via diversity. Fewer saves power
1083 * at the expense of throughput, but only when not in powersave to
1084 * start with.
1085 *
1086 * MIMO (dual stream) requires at least 2, but works better with 3.
1087 * This does not determine *which* chains to use, just how many.
1088 */
1089static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1090{
1091 /* # of Rx chains to use when expecting MIMO. */
1092 if (iwl4965_is_single_rx_stream(priv))
1093 return IWL_NUM_RX_CHAINS_SINGLE;
1094 else
1095 return IWL_NUM_RX_CHAINS_MULTIPLE;
1096}
1097
1098/*
1099 * When we are in power saving mode, unless device support spatial
1100 * multiplexing power save, use the active count for rx chain count.
1101 */
1102static int
1103iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1104{
1105 /* # Rx chains when idling, depending on SMPS mode */
1106 switch (priv->current_ht_config.smps) {
1107 case IEEE80211_SMPS_STATIC:
1108 case IEEE80211_SMPS_DYNAMIC:
1109 return IWL_NUM_IDLE_CHAINS_SINGLE;
1110 case IEEE80211_SMPS_OFF:
1111 return active_cnt;
1112 default:
1113 WARN(1, "invalid SMPS mode %d",
1114 priv->current_ht_config.smps);
1115 return active_cnt;
1116 }
1117}
1118
1119/* up to 4 chains */
1120static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1121{
1122 u8 res;
1123 res = (chain_bitmap & BIT(0)) >> 0;
1124 res += (chain_bitmap & BIT(1)) >> 1;
1125 res += (chain_bitmap & BIT(2)) >> 2;
1126 res += (chain_bitmap & BIT(3)) >> 3;
1127 return res;
1128}
1129
1130/**
1131 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1132 *
1133 * Selects how many and which Rx receivers/antennas/chains to use.
1134 * This should not be used for scan command ... it puts data in wrong place.
1135 */
1136void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1137{
1138 bool is_single = iwl4965_is_single_rx_stream(priv);
1139 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1140 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1141 u32 active_chains;
1142 u16 rx_chain;
1143
1144 /* Tell uCode which antennas are actually connected.
1145 * Before first association, we assume all antennas are connected.
1146 * Just after first association, iwl4965_chain_noise_calibration()
1147 * checks which antennas actually *are* connected. */
1148 if (priv->chain_noise_data.active_chains)
1149 active_chains = priv->chain_noise_data.active_chains;
1150 else
1151 active_chains = priv->hw_params.valid_rx_ant;
1152
1153 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1154
1155 /* How many receivers should we use? */
1156 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1157 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1158
1159
1160 /* correct rx chain count according hw settings
1161 * and chain noise calibration
1162 */
1163 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1164 if (valid_rx_cnt < active_rx_cnt)
1165 active_rx_cnt = valid_rx_cnt;
1166
1167 if (valid_rx_cnt < idle_rx_cnt)
1168 idle_rx_cnt = valid_rx_cnt;
1169
1170 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1171 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1172
1173 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1174
1175 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1176 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1177 else
1178 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1179
1180 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1181 ctx->staging.rx_chain,
1182 active_rx_cnt, idle_rx_cnt);
1183
1184 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1185 active_rx_cnt < idle_rx_cnt);
1186}
1187
1188u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1189{
1190 int i;
1191 u8 ind = ant;
1192
1193 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1194 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1195 if (valid & BIT(ind))
1196 return ind;
1197 }
1198 return ant;
1199}
1200
1201static const char *iwl4965_get_fh_string(int cmd)
1202{
1203 switch (cmd) {
1204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1206 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1211 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1212 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1213 default:
1214 return "UNKNOWN";
1215 }
1216}
1217
1218int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1219{
1220 int i;
1221#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1222 int pos = 0;
1223 size_t bufsz = 0;
1224#endif
1225 static const u32 fh_tbl[] = {
1226 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1227 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1228 FH_RSCSR_CHNL0_WPTR,
1229 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1230 FH_MEM_RSSR_SHARED_CTRL_REG,
1231 FH_MEM_RSSR_RX_STATUS_REG,
1232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1233 FH_TSSR_TX_STATUS_REG,
1234 FH_TSSR_TX_ERROR_REG
1235 };
1236#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1237 if (display) {
1238 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1239 *buf = kmalloc(bufsz, GFP_KERNEL);
1240 if (!*buf)
1241 return -ENOMEM;
1242 pos += scnprintf(*buf + pos, bufsz - pos,
1243 "FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 pos += scnprintf(*buf + pos, bufsz - pos,
1246 " %34s: 0X%08x\n",
1247 iwl4965_get_fh_string(fh_tbl[i]),
1248 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1249 }
1250 return pos;
1251 }
1252#endif
1253 IWL_ERR(priv, "FH register values:\n");
1254 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1255 IWL_ERR(priv, " %34s: 0X%08x\n",
1256 iwl4965_get_fh_string(fh_tbl[i]),
1257 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1258 }
1259 return 0;
1260}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..31ac672b64e1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-4965.h"
43
44#define IWL4965_RS_NAME "iwl-4965-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91
92/*
93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
95 *
96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID
98 *
99 */
100const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
114};
115
116static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
117{
118 int idx = 0;
119
120 /* HT rate format */
121 if (rate_n_flags & RATE_MCS_HT_MSK) {
122 idx = (rate_n_flags & 0xff);
123
124 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
137 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143
144static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
145 struct sk_buff *skb,
146 struct ieee80211_sta *sta,
147 struct iwl_lq_sta *lq_sta);
148static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
149 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
150static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
151 bool force_search);
152
153#ifdef CONFIG_MAC80211_DEBUGFS
154static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
155 u32 *rate_n_flags, int index);
156#else
157static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
158 u32 *rate_n_flags, int index)
159{}
160#endif
161
162/**
163 * The following tables contain the expected throughput metrics for all rates
164 *
165 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
166 *
167 * where invalid entries are zeros.
168 *
169 * CCK rates are only valid in legacy table and will only be used in G
170 * (2.4 GHz) band.
171 */
172
173static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
174 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
175};
176
177static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
178 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
179 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
180 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
181 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
182};
183
184static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
186 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
187 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
188 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
193 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
194 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
195 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
196};
197
198static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
200 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
201 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
202 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
203};
204
205/* mbps, mcs */
206static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
207 { "1", "BPSK DSSS"},
208 { "2", "QPSK DSSS"},
209 {"5.5", "BPSK CCK"},
210 { "11", "QPSK CCK"},
211 { "6", "BPSK 1/2"},
212 { "9", "BPSK 1/2"},
213 { "12", "QPSK 1/2"},
214 { "18", "QPSK 3/4"},
215 { "24", "16QAM 1/2"},
216 { "36", "16QAM 3/4"},
217 { "48", "64QAM 2/3"},
218 { "54", "64QAM 3/4"},
219 { "60", "64QAM 5/6"},
220};
221
222#define MCS_INDEX_PER_STREAM (8)
223
224static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8)(rate_n_flags & 0xFF);
227}
228
229static void
230iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
231{
232 window->data = 0;
233 window->success_counter = 0;
234 window->success_ratio = IWL_INVALID_VALUE;
235 window->counter = 0;
236 window->average_tpt = IWL_INVALID_VALUE;
237 window->stamp = 0;
238}
239
240static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the statistics. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count &&
256 (tl->time_stamp < oldest_time)) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
272 struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 index;
277 struct iwl_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 index = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (index >= TID_QUEUE_MAX_SIZE)
309 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[index] = tl->packet_count[index] + 1;
313 tl->total = tl->total + 1;
314
315 if ((index + 1) > tl->queue_count)
316 tl->queue_count = index + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 index;
329 struct iwl_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 index = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (index >= TID_QUEUE_MAX_SIZE)
347 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
353 struct iwl_lq_sta *lq_data, u8 tid,
354 struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = iwl4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IWL_AGG_LOAD_THRESHOLD) {
362 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
363 sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
372 tid);
373 ieee80211_stop_tx_ba_session(sta, tid);
374 }
375 } else {
376 IWL_ERR(priv, "Aggregation not enabled for tid %d "
377 "because load = %u\n", tid, load);
378 }
379 return ret;
380}
381
382static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
383 struct iwl_lq_sta *lq_data,
384 struct ieee80211_sta *sta)
385{
386 if (tid < TID_MAX_LOAD_COUNT)
387 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
388 else
389 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
390 tid, TID_MAX_LOAD_COUNT);
391}
392
393static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an iwl_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_index];
409 return 0;
410}
411
412/**
413 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
414 *
415 * We keep a sliding window of the last 62 packets transmitted
416 * at this rate. window->data contains the bitmask of successful
417 * packets.
418 */
419static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
420 int scale_index, int attempts, int successes)
421{
422 struct iwl_rate_scale_data *window = NULL;
423 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
427 return -EINVAL;
428
429 /* Select window for current tx bit rate */
430 window = &(tbl->win[scale_index]);
431
432 /* Get expected throughput */
433 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history window; anything older isn't really relevant any more.
438 * If we have filled up the sliding window, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (window->counter >= IWL_RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 window->counter = IWL_RATE_MAX_WINDOW - 1;
448
449 if (window->data & mask) {
450 window->data &= ~mask;
451 window->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 window->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 window->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 window->success_counter++;
464 window->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (window->counter > 0)
473 window->success_ratio = 128 * (100 * window->success_counter)
474 / window->counter;
475 else
476 window->success_ratio = IWL_INVALID_VALUE;
477
478 fail_count = window->counter - window->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
482 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
483 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
484 else
485 window->average_tpt = IWL_INVALID_VALUE;
486
487 /* Tag this window as having been updated */
488 window->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
497 struct iwl_scale_tbl_info *tbl,
498 int index, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = iwlegacy_rates[index].plcp;
504 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (index > IWL_LAST_OFDM_RATE) {
509 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
510 index = IWL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
516 else
517 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
518 } else {
519 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
523 RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IWL_ERR(priv, "GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
551 enum ieee80211_band band,
552 struct iwl_scale_tbl_info *tbl,
553 int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
557 u8 mcs;
558
559 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
560 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
561
562 if (*rate_idx == IWL_RATE_INVALID) {
563 *rate_idx = -1;
564 return -EINVAL;
565 }
566 tbl->is_SGI = 0; /* default legacy setup */
567 tbl->is_ht40 = 0;
568 tbl->is_dup = 0;
569 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
570 tbl->lq_type = LQ_NONE;
571 tbl->max_search = IWL_MAX_SEARCH;
572
573 /* legacy rate format */
574 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
575 if (iwl4965_num_of_ant == 1) {
576 if (band == IEEE80211_BAND_5GHZ)
577 tbl->lq_type = LQ_A;
578 else
579 tbl->lq_type = LQ_G;
580 }
581 /* HT rate format */
582 } else {
583 if (rate_n_flags & RATE_MCS_SGI_MSK)
584 tbl->is_SGI = 1;
585
586 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
587 (rate_n_flags & RATE_MCS_DUP_MSK))
588 tbl->is_ht40 = 1;
589
590 if (rate_n_flags & RATE_MCS_DUP_MSK)
591 tbl->is_dup = 1;
592
593 mcs = iwl4965_rs_extract_rate(rate_n_flags);
594
595 /* SISO */
596 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
597 if (iwl4965_num_of_ant == 1)
598 tbl->lq_type = LQ_SISO; /*else NONE*/
599 /* MIMO2 */
600 } else {
601 if (iwl4965_num_of_ant == 2)
602 tbl->lq_type = LQ_MIMO2;
603 }
604 }
605 return 0;
606}
607
608/* switch to another antenna/antennas and return 1 */
609/* if no other valid antenna found, return 0 */
610static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
611 struct iwl_scale_tbl_info *tbl)
612{
613 u8 new_ant_type;
614
615 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
616 return 0;
617
618 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
619 return 0;
620
621 new_ant_type = ant_toggle_lookup[tbl->ant_type];
622
623 while ((new_ant_type != tbl->ant_type) &&
624 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
625 new_ant_type = ant_toggle_lookup[new_ant_type];
626
627 if (new_ant_type == tbl->ant_type)
628 return 0;
629
630 tbl->ant_type = new_ant_type;
631 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
632 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
633 return 1;
634}
635
636/**
637 * Green-field mode is valid if the station supports it and
638 * there are no non-GF stations present in the BSS.
639 */
640static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
641{
642 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
643 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
644
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !(ctx->ht.non_gf_sta_present);
647}
648
649/**
650 * iwl4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
657 struct ieee80211_hdr *hdr,
658 enum iwl_table_type rate_type)
659{
660 if (is_legacy(rate_type)) {
661 return lq_sta->active_legacy_rate;
662 } else {
663 if (is_siso(rate_type))
664 return lq_sta->active_siso_rate;
665 else
666 return lq_sta->active_mimo2_rate;
667 }
668}
669
670static u16
671iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
673{
674 u8 high = IWL_RATE_INVALID;
675 u8 low = IWL_RATE_INVALID;
676
677 /* 802.11A or ht walks to the next literal adjacent rate in
678 * the rate table */
679 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
680 int i;
681 u32 mask;
682
683 /* Find the previous rate that is in the rate mask */
684 i = index - 1;
685 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
686 if (rate_mask & mask) {
687 low = i;
688 break;
689 }
690 }
691
692 /* Find the next rate that is in the rate mask */
693 i = index + 1;
694 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
695 if (rate_mask & mask) {
696 high = i;
697 break;
698 }
699 }
700
701 return (high << 8) | low;
702 }
703
704 low = index;
705 while (low != IWL_RATE_INVALID) {
706 low = iwlegacy_rates[low].prev_rs;
707 if (low == IWL_RATE_INVALID)
708 break;
709 if (rate_mask & (1 << low))
710 break;
711 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
712 }
713
714 high = index;
715 while (high != IWL_RATE_INVALID) {
716 high = iwlegacy_rates[high].next_rs;
717 if (high == IWL_RATE_INVALID)
718 break;
719 if (rate_mask & (1 << high))
720 break;
721 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
722 }
723
724 return (high << 8) | low;
725}
726
727static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 u8 scale_index, u8 ht_possible)
730{
731 s32 low;
732 u16 rate_mask;
733 u16 high_low;
734 u8 switch_to_legacy = 0;
735 u8 is_green = lq_sta->is_green;
736 struct iwl_priv *priv = lq_sta->drv;
737
738 /* check if we need to switch from HT to legacy rates.
739 * assumption is that mandatory rates (1Mbps or 6Mbps)
740 * are always supported (spec demand) */
741 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
742 switch_to_legacy = 1;
743 scale_index = rs_ht_to_legacy[scale_index];
744 if (lq_sta->band == IEEE80211_BAND_5GHZ)
745 tbl->lq_type = LQ_A;
746 else
747 tbl->lq_type = LQ_G;
748
749 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
750 tbl->ant_type =
751 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
752
753 tbl->is_ht40 = 0;
754 tbl->is_SGI = 0;
755 tbl->max_search = IWL_MAX_SEARCH;
756 }
757
758 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
759
760 /* Mask with station rate restriction */
761 if (is_legacy(tbl->lq_type)) {
762 /* supp_rates has no CCK bits in A mode */
763 if (lq_sta->band == IEEE80211_BAND_5GHZ)
764 rate_mask = (u16)(rate_mask &
765 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
766 else
767 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
768 }
769
770 /* If we switched from HT to legacy, check current rate */
771 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
772 low = scale_index;
773 goto out;
774 }
775
776 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
777 scale_index, rate_mask,
778 tbl->lq_type);
779 low = high_low & 0xff;
780
781 if (low == IWL_RATE_INVALID)
782 low = scale_index;
783
784out:
785 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
786}
787
788/*
789 * Simple function to compare two rate scale table types
790 */
791static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
792 struct iwl_scale_tbl_info *b)
793{
794 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
795 (a->is_SGI == b->is_SGI);
796}
797
798/*
799 * mac80211 sends us Tx status
800 */
801static void
802iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
803 struct ieee80211_sta *sta, void *priv_sta,
804 struct sk_buff *skb)
805{
806 int legacy_success;
807 int retries;
808 int rs_index, mac_index, i;
809 struct iwl_lq_sta *lq_sta = priv_sta;
810 struct iwl_link_quality_cmd *table;
811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
812 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
813 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
814 enum mac80211_rate_control_flags mac_flags;
815 u32 tx_rate;
816 struct iwl_scale_tbl_info tbl_type;
817 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
818 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
819 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
820
821 IWL_DEBUG_RATE_LIMIT(priv,
822 "get frame ack response, update rate scale window\n");
823
824 /* Treat uninitialized rate scaling data same as non-existing. */
825 if (!lq_sta) {
826 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
827 return;
828 } else if (!lq_sta->drv) {
829 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
830 return;
831 }
832
833 if (!ieee80211_is_data(hdr->frame_control) ||
834 info->flags & IEEE80211_TX_CTL_NO_ACK)
835 return;
836
837 /* This packet was aggregated but doesn't carry status info */
838 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
839 !(info->flags & IEEE80211_TX_STAT_AMPDU))
840 return;
841
842 /*
843 * Ignore this Tx frame response if its initial rate doesn't match
844 * that of latest Link Quality command. There may be stragglers
845 * from a previous Link Quality command, but we're no longer interested
846 * in those; they're either from the "active" mode while we're trying
847 * to check "search" mode, or a prior "search" mode after we've moved
848 * to a new "search" mode (which might become the new "active" mode).
849 */
850 table = &lq_sta->lq;
851 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
852 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
853 priv->band, &tbl_type, &rs_index);
854 if (priv->band == IEEE80211_BAND_5GHZ)
855 rs_index -= IWL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_index = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
862 mac_index++;
863 /*
864 * mac80211 HT index is always zero-indexed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (priv->band == IEEE80211_BAND_2GHZ)
868 mac_index += IWL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if ((mac_index < 0) ||
872 (tbl_type.is_SGI !=
873 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
874 (tbl_type.is_ht40 !=
875 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
876 (tbl_type.is_dup !=
877 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
878 (tbl_type.ant_type != info->antenna_sel_tx) ||
879 (!!(tx_rate & RATE_MCS_HT_MSK) !=
880 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
881 (!!(tx_rate & RATE_MCS_GF_MSK) !=
882 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
883 (rs_index != mac_index)) {
884 IWL_DEBUG_RATE(priv,
885 "initial rate %d does not match %d (0x%x)\n",
886 mac_index, rs_index, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
896 CMD_ASYNC, false);
897 }
898 /* Regardless, ignore this status info for outdated rate */
899 return;
900 } else
901 /* Rate did match, so reset the missed_rate_counter */
902 lq_sta->missed_rate_counter = 0;
903
904 /* Figure out if rate scale algorithm is in active or search table */
905 if (iwl4965_table_type_matches(&tbl_type,
906 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
907 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
908 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
909 } else if (iwl4965_table_type_matches(&tbl_type,
910 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 IWL_DEBUG_RATE(priv,
915 "Neither active nor search matches tx rate\n");
916 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
917 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
918 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
919 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
920 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
921 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
922 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
923 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
924 /*
925 * no matching table found, let's by-pass the data collection
926 * and continue to perform rate scale to find the rate table
927 */
928 iwl4965_rs_stay_in_table(lq_sta, true);
929 goto done;
930 }
931
932 /*
933 * Updating the frame history depends on whether packets were
934 * aggregated.
935 *
936 * For aggregation, all packets were transmitted at the same rate, the
937 * first index into rate scale table.
938 */
939 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
942 &rs_index);
943 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
944 info->status.ampdu_len,
945 info->status.ampdu_ack_len);
946
947 /* Update success/fail counts if not searching for new mode */
948 if (lq_sta->stay_in_tbl) {
949 lq_sta->total_success += info->status.ampdu_ack_len;
950 lq_sta->total_failed += (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
967 &tbl_type, &rs_index);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (iwl4965_table_type_matches(&tbl_type,
975 other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
980 i < retries ? 0 : legacy_success);
981 }
982
983 /* Update success/fail counts if not searching for new mode */
984 if (lq_sta->stay_in_tbl) {
985 lq_sta->total_success += legacy_success;
986 lq_sta->total_failed += retries + (1 - legacy_success);
987 }
988 }
989 /* The last TX rate is cached in lq_sta; it's set in if/else above */
990 lq_sta->last_rate_n_flags = tx_rate;
991done:
992 /* See if there's a better rate or modulation mode to try. */
993 if (sta && sta->supp_rates[sband->band])
994 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
995}
996
997/*
998 * Begin a period of staying with a selected modulation mode.
999 * Set "stay_in_tbl" flag to prevent any mode switches.
1000 * Set frame tx success limits according to legacy vs. high-throughput,
1001 * and reset overall (spanning all rates) tx success history statistics.
1002 * These control how long we stay using same modulation mode before
1003 * searching for a new mode.
1004 */
1005static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1006 struct iwl_lq_sta *lq_sta)
1007{
1008 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1009 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1010 if (is_legacy) {
1011 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1012 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1014 } else {
1015 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1016 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1017 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1018 }
1019 lq_sta->table_count = 0;
1020 lq_sta->total_failed = 0;
1021 lq_sta->total_success = 0;
1022 lq_sta->flush_timer = jiffies;
1023 lq_sta->action_counter = 0;
1024}
1025
1026/*
1027 * Find correct throughput table for given mode of modulation
1028 */
1029static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1030 struct iwl_scale_tbl_info *tbl)
1031{
1032 /* Used to choose among HT tables */
1033 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1034
1035 /* Check for invalid LQ type */
1036 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1037 tbl->expected_tpt = expected_tpt_legacy;
1038 return;
1039 }
1040
1041 /* Legacy rates have only one table */
1042 if (is_legacy(tbl->lq_type)) {
1043 tbl->expected_tpt = expected_tpt_legacy;
1044 return;
1045 }
1046
1047 /* Choose among many HT tables depending on number of streams
1048 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1049 * status */
1050 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1051 ht_tbl_pointer = expected_tpt_siso20MHz;
1052 else if (is_siso(tbl->lq_type))
1053 ht_tbl_pointer = expected_tpt_siso40MHz;
1054 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1055 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1056 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1057 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1058
1059 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1060 tbl->expected_tpt = ht_tbl_pointer[0];
1061 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1062 tbl->expected_tpt = ht_tbl_pointer[1];
1063 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1064 tbl->expected_tpt = ht_tbl_pointer[2];
1065 else /* AGG+SGI */
1066 tbl->expected_tpt = ht_tbl_pointer[3];
1067}
1068
1069/*
1070 * Find starting rate for new "search" high-throughput mode of modulation.
1071 * Goal is to find lowest expected rate (under perfect conditions) that is
1072 * above the current measured throughput of "active" mode, to give new mode
1073 * a fair chance to prove itself without too many challenges.
1074 *
1075 * This gets called when transitioning to more aggressive modulation
1076 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1077 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1078 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1079 * bit rate will typically need to increase, but not if performance was bad.
1080 */
1081static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1082 struct iwl_lq_sta *lq_sta,
1083 struct iwl_scale_tbl_info *tbl, /* "search" */
1084 u16 rate_mask, s8 index)
1085{
1086 /* "active" values */
1087 struct iwl_scale_tbl_info *active_tbl =
1088 &(lq_sta->lq_info[lq_sta->active_tbl]);
1089 s32 active_sr = active_tbl->win[index].success_ratio;
1090 s32 active_tpt = active_tbl->expected_tpt[index];
1091
1092 /* expected "search" throughput */
1093 s32 *tpt_tbl = tbl->expected_tpt;
1094
1095 s32 new_rate, high, low, start_hi;
1096 u16 high_low;
1097 s8 rate = index;
1098
1099 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1100
1101 for (; ;) {
1102 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1103 tbl->lq_type);
1104
1105 low = high_low & 0xff;
1106 high = (high_low >> 8) & 0xff;
1107
1108 /*
1109 * Lower the "search" bit rate, to give new "search" mode
1110 * approximately the same throughput as "active" if:
1111 *
1112 * 1) "Active" mode has been working modestly well (but not
1113 * great), and expected "search" throughput (under perfect
1114 * conditions) at candidate rate is above the actual
1115 * measured "active" throughput (but less than expected
1116 * "active" throughput under perfect conditions).
1117 * OR
1118 * 2) "Active" mode has been working perfectly or very well
1119 * and expected "search" throughput (under perfect
1120 * conditions) at candidate rate is above expected
1121 * "active" throughput (under perfect conditions).
1122 */
1123 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1124 ((active_sr > IWL_RATE_DECREASE_TH) &&
1125 (active_sr <= IWL_RATE_HIGH_TH) &&
1126 (tpt_tbl[rate] <= active_tpt))) ||
1127 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1128 (tpt_tbl[rate] > active_tpt))) {
1129
1130 /* (2nd or later pass)
1131 * If we've already tried to raise the rate, and are
1132 * now trying to lower it, use the higher rate. */
1133 if (start_hi != IWL_RATE_INVALID) {
1134 new_rate = start_hi;
1135 break;
1136 }
1137
1138 new_rate = rate;
1139
1140 /* Loop again with lower rate */
1141 if (low != IWL_RATE_INVALID)
1142 rate = low;
1143
1144 /* Lower rate not available, use the original */
1145 else
1146 break;
1147
1148 /* Else try to raise the "search" rate to match "active" */
1149 } else {
1150 /* (2nd or later pass)
1151 * If we've already tried to lower the rate, and are
1152 * now trying to raise it, use the lower rate. */
1153 if (new_rate != IWL_RATE_INVALID)
1154 break;
1155
1156 /* Loop again with higher rate */
1157 else if (high != IWL_RATE_INVALID) {
1158 start_hi = high;
1159 rate = high;
1160
1161 /* Higher rate not available, use the original */
1162 } else {
1163 new_rate = rate;
1164 break;
1165 }
1166 }
1167 }
1168
1169 return new_rate;
1170}
1171
1172/*
1173 * Set up search table for MIMO2
1174 */
1175static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1176 struct iwl_lq_sta *lq_sta,
1177 struct ieee80211_conf *conf,
1178 struct ieee80211_sta *sta,
1179 struct iwl_scale_tbl_info *tbl, int index)
1180{
1181 u16 rate_mask;
1182 s32 rate;
1183 s8 is_green = lq_sta->is_green;
1184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1186
1187 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1188 return -1;
1189
1190 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1191 == WLAN_HT_CAP_SM_PS_STATIC)
1192 return -1;
1193
1194 /* Need both Tx chains/antennas to support MIMO */
1195 if (priv->hw_params.tx_chains_num < 2)
1196 return -1;
1197
1198 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1199
1200 tbl->lq_type = LQ_MIMO2;
1201 tbl->is_dup = lq_sta->is_dup;
1202 tbl->action = 0;
1203 tbl->max_search = IWL_MAX_SEARCH;
1204 rate_mask = lq_sta->active_mimo2_rate;
1205
1206 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1207 tbl->is_ht40 = 1;
1208 else
1209 tbl->is_ht40 = 0;
1210
1211 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1212
1213 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1214
1215 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1216 rate, rate_mask);
1217 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1218 IWL_DEBUG_RATE(priv,
1219 "Can't switch with index %d rate mask %x\n",
1220 rate, rate_mask);
1221 return -1;
1222 }
1223 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1224 tbl, rate, is_green);
1225
1226 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1227 tbl->current_rate, is_green);
1228 return 0;
1229}
1230
1231/*
1232 * Set up search table for SISO
1233 */
1234static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1235 struct iwl_lq_sta *lq_sta,
1236 struct ieee80211_conf *conf,
1237 struct ieee80211_sta *sta,
1238 struct iwl_scale_tbl_info *tbl, int index)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IWL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1264
1265 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1267
1268 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1270 IWL_DEBUG_RATE(priv,
1271 "can not switch with index %d rate mask %x\n",
1272 rate, rate_mask);
1273 return -1;
1274 }
1275 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1276 tbl, rate, is_green);
1277 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1278 tbl->current_rate, is_green);
1279 return 0;
1280}
1281
1282/*
1283 * Try to switch to new modulation mode from legacy
1284 */
1285static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1286 struct iwl_lq_sta *lq_sta,
1287 struct ieee80211_conf *conf,
1288 struct ieee80211_sta *sta,
1289 int index)
1290{
1291 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1292 struct iwl_scale_tbl_info *search_tbl =
1293 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1294 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1295 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1296 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1297 u8 start_action;
1298 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1299 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1300 int ret = 0;
1301 u8 update_search_tbl_counter = 0;
1302
1303 tbl->action = IWL_LEGACY_SWITCH_SISO;
1304
1305 start_action = tbl->action;
1306 for (; ;) {
1307 lq_sta->action_counter++;
1308 switch (tbl->action) {
1309 case IWL_LEGACY_SWITCH_ANTENNA1:
1310 case IWL_LEGACY_SWITCH_ANTENNA2:
1311 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1312
1313 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1314 tx_chains_num <= 1) ||
1315 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1316 tx_chains_num <= 2))
1317 break;
1318
1319 /* Don't change antenna if success has been great */
1320 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1321 break;
1322
1323 /* Set up search table to try other antenna */
1324 memcpy(search_tbl, tbl, sz);
1325
1326 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1327 &search_tbl->current_rate, search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 iwl4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IWL_LEGACY_SWITCH_SISO:
1335 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1341 search_tbl, index);
1342 if (!ret) {
1343 lq_sta->action_counter = 0;
1344 goto out;
1345 }
1346
1347 break;
1348 case IWL_LEGACY_SWITCH_MIMO2_AB:
1349 case IWL_LEGACY_SWITCH_MIMO2_AC:
1350 case IWL_LEGACY_SWITCH_MIMO2_BC:
1351 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1352
1353 /* Set up search table to try MIMO */
1354 memcpy(search_tbl, tbl, sz);
1355 search_tbl->is_SGI = 0;
1356
1357 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1358 search_tbl->ant_type = ANT_AB;
1359 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1360 search_tbl->ant_type = ANT_AC;
1361 else
1362 search_tbl->ant_type = ANT_BC;
1363
1364 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1365 search_tbl->ant_type))
1366 break;
1367
1368 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1369 conf, sta,
1370 search_tbl, index);
1371 if (!ret) {
1372 lq_sta->action_counter = 0;
1373 goto out;
1374 }
1375 break;
1376 }
1377 tbl->action++;
1378 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1379 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1380
1381 if (tbl->action == start_action)
1382 break;
1383
1384 }
1385 search_tbl->lq_type = LQ_NONE;
1386 return 0;
1387
1388out:
1389 lq_sta->search_better_tbl = 1;
1390 tbl->action++;
1391 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1392 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1393 if (update_search_tbl_counter)
1394 search_tbl->action = tbl->action;
1395 return 0;
1396
1397}
1398
1399/*
1400 * Try to switch to new modulation mode from SISO
1401 */
1402static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct iwl_lq_sta *lq_sta,
1404 struct ieee80211_conf *conf,
1405 struct ieee80211_sta *sta, int index)
1406{
1407 u8 is_green = lq_sta->is_green;
1408 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1409 struct iwl_scale_tbl_info *search_tbl =
1410 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1411 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1412 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1413 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1414 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1415 u8 start_action;
1416 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1417 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1418 u8 update_search_tbl_counter = 0;
1419 int ret;
1420
1421 start_action = tbl->action;
1422
1423 for (;;) {
1424 lq_sta->action_counter++;
1425 switch (tbl->action) {
1426 case IWL_SISO_SWITCH_ANTENNA1:
1427 case IWL_SISO_SWITCH_ANTENNA2:
1428 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1429 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1430 tx_chains_num <= 1) ||
1431 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1432 tx_chains_num <= 2))
1433 break;
1434
1435 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1436 break;
1437
1438 memcpy(search_tbl, tbl, sz);
1439 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1440 &search_tbl->current_rate, search_tbl)) {
1441 update_search_tbl_counter = 1;
1442 goto out;
1443 }
1444 break;
1445 case IWL_SISO_SWITCH_MIMO2_AB:
1446 case IWL_SISO_SWITCH_MIMO2_AC:
1447 case IWL_SISO_SWITCH_MIMO2_BC:
1448 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1449 memcpy(search_tbl, tbl, sz);
1450 search_tbl->is_SGI = 0;
1451
1452 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1453 search_tbl->ant_type = ANT_AB;
1454 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1455 search_tbl->ant_type = ANT_AC;
1456 else
1457 search_tbl->ant_type = ANT_BC;
1458
1459 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1460 search_tbl->ant_type))
1461 break;
1462
1463 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1464 conf, sta,
1465 search_tbl, index);
1466 if (!ret)
1467 goto out;
1468 break;
1469 case IWL_SISO_SWITCH_GI:
1470 if (!tbl->is_ht40 && !(ht_cap->cap &
1471 IEEE80211_HT_CAP_SGI_20))
1472 break;
1473 if (tbl->is_ht40 && !(ht_cap->cap &
1474 IEEE80211_HT_CAP_SGI_40))
1475 break;
1476
1477 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1478
1479 memcpy(search_tbl, tbl, sz);
1480 if (is_green) {
1481 if (!tbl->is_SGI)
1482 break;
1483 else
1484 IWL_ERR(priv,
1485 "SGI was set in GF+SISO\n");
1486 }
1487 search_tbl->is_SGI = !tbl->is_SGI;
1488 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1489 if (tbl->is_SGI) {
1490 s32 tpt = lq_sta->last_tpt / 100;
1491 if (tpt >= search_tbl->expected_tpt[index])
1492 break;
1493 }
1494 search_tbl->current_rate =
1495 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1496 index, is_green);
1497 update_search_tbl_counter = 1;
1498 goto out;
1499 }
1500 tbl->action++;
1501 if (tbl->action > IWL_SISO_SWITCH_GI)
1502 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1503
1504 if (tbl->action == start_action)
1505 break;
1506 }
1507 search_tbl->lq_type = LQ_NONE;
1508 return 0;
1509
1510 out:
1511 lq_sta->search_better_tbl = 1;
1512 tbl->action++;
1513 if (tbl->action > IWL_SISO_SWITCH_GI)
1514 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1515 if (update_search_tbl_counter)
1516 search_tbl->action = tbl->action;
1517
1518 return 0;
1519}
1520
1521/*
1522 * Try to switch to new modulation mode from MIMO2
1523 */
1524static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1525 struct iwl_lq_sta *lq_sta,
1526 struct ieee80211_conf *conf,
1527 struct ieee80211_sta *sta, int index)
1528{
1529 s8 is_green = lq_sta->is_green;
1530 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1531 struct iwl_scale_tbl_info *search_tbl =
1532 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1533 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1534 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1535 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1536 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1537 u8 start_action;
1538 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1539 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1540 u8 update_search_tbl_counter = 0;
1541 int ret;
1542
1543 start_action = tbl->action;
1544 for (;;) {
1545 lq_sta->action_counter++;
1546 switch (tbl->action) {
1547 case IWL_MIMO2_SWITCH_ANTENNA1:
1548 case IWL_MIMO2_SWITCH_ANTENNA2:
1549 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1550
1551 if (tx_chains_num <= 2)
1552 break;
1553
1554 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1555 break;
1556
1557 memcpy(search_tbl, tbl, sz);
1558 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1559 &search_tbl->current_rate, search_tbl)) {
1560 update_search_tbl_counter = 1;
1561 goto out;
1562 }
1563 break;
1564 case IWL_MIMO2_SWITCH_SISO_A:
1565 case IWL_MIMO2_SWITCH_SISO_B:
1566 case IWL_MIMO2_SWITCH_SISO_C:
1567 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1568
1569 /* Set up new search table for SISO */
1570 memcpy(search_tbl, tbl, sz);
1571
1572 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1573 search_tbl->ant_type = ANT_A;
1574 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1575 search_tbl->ant_type = ANT_B;
1576 else
1577 search_tbl->ant_type = ANT_C;
1578
1579 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1580 search_tbl->ant_type))
1581 break;
1582
1583 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1584 conf, sta,
1585 search_tbl, index);
1586 if (!ret)
1587 goto out;
1588
1589 break;
1590
1591 case IWL_MIMO2_SWITCH_GI:
1592 if (!tbl->is_ht40 && !(ht_cap->cap &
1593 IEEE80211_HT_CAP_SGI_20))
1594 break;
1595 if (tbl->is_ht40 && !(ht_cap->cap &
1596 IEEE80211_HT_CAP_SGI_40))
1597 break;
1598
1599 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1600
1601 /* Set up new search table for MIMO2 */
1602 memcpy(search_tbl, tbl, sz);
1603 search_tbl->is_SGI = !tbl->is_SGI;
1604 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1605 /*
1606 * If active table already uses the fastest possible
1607 * modulation (dual stream with short guard interval),
1608 * and it's working well, there's no need to look
1609 * for a better type of modulation!
1610 */
1611 if (tbl->is_SGI) {
1612 s32 tpt = lq_sta->last_tpt / 100;
1613 if (tpt >= search_tbl->expected_tpt[index])
1614 break;
1615 }
1616 search_tbl->current_rate =
1617 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1618 index, is_green);
1619 update_search_tbl_counter = 1;
1620 goto out;
1621
1622 }
1623 tbl->action++;
1624 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1625 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1626
1627 if (tbl->action == start_action)
1628 break;
1629 }
1630 search_tbl->lq_type = LQ_NONE;
1631 return 0;
1632 out:
1633 lq_sta->search_better_tbl = 1;
1634 tbl->action++;
1635 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1636 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1637 if (update_search_tbl_counter)
1638 search_tbl->action = tbl->action;
1639
1640 return 0;
1641
1642}
1643
1644/*
1645 * Check whether we should continue using same modulation mode, or
1646 * begin search for a new mode, based on:
1647 * 1) # tx successes or failures while using this mode
1648 * 2) # times calling this function
1649 * 3) elapsed time in this mode (not used, for now)
1650 */
1651static void
1652iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1653{
1654 struct iwl_scale_tbl_info *tbl;
1655 int i;
1656 int active_tbl;
1657 int flush_interval_passed = 0;
1658 struct iwl_priv *priv;
1659
1660 priv = lq_sta->drv;
1661 active_tbl = lq_sta->active_tbl;
1662
1663 tbl = &(lq_sta->lq_info[active_tbl]);
1664
1665 /* If we've been disallowing search, see if we should now allow it */
1666 if (lq_sta->stay_in_tbl) {
1667
1668 /* Elapsed time using current modulation mode */
1669 if (lq_sta->flush_timer)
1670 flush_interval_passed =
1671 time_after(jiffies,
1672 (unsigned long)(lq_sta->flush_timer +
1673 IWL_RATE_SCALE_FLUSH_INTVL));
1674
1675 /*
1676 * Check if we should allow search for new modulation mode.
1677 * If many frames have failed or succeeded, or we've used
1678 * this same modulation for a long time, allow search, and
1679 * reset history stats that keep track of whether we should
1680 * allow a new search. Also (below) reset all bitmaps and
1681 * stats in active history.
1682 */
1683 if (force_search ||
1684 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1685 (lq_sta->total_success > lq_sta->max_success_limit) ||
1686 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1687 && (flush_interval_passed))) {
1688 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1689 lq_sta->total_failed,
1690 lq_sta->total_success,
1691 flush_interval_passed);
1692
1693 /* Allow search for new mode */
1694 lq_sta->stay_in_tbl = 0; /* only place reset */
1695 lq_sta->total_failed = 0;
1696 lq_sta->total_success = 0;
1697 lq_sta->flush_timer = 0;
1698
1699 /*
1700 * Else if we've used this modulation mode enough repetitions
1701 * (regardless of elapsed time or success/failure), reset
1702 * history bitmaps and rate-specific stats for all rates in
1703 * active table.
1704 */
1705 } else {
1706 lq_sta->table_count++;
1707 if (lq_sta->table_count >=
1708 lq_sta->table_count_limit) {
1709 lq_sta->table_count = 0;
1710
1711 IWL_DEBUG_RATE(priv,
1712 "LQ: stay in table clear win\n");
1713 for (i = 0; i < IWL_RATE_COUNT; i++)
1714 iwl4965_rs_rate_scale_clear_window(
1715 &(tbl->win[i]));
1716 }
1717 }
1718
1719 /* If transitioning to allow "search", reset all history
1720 * bitmaps and stats in active table (this will become the new
1721 * "search" table). */
1722 if (!lq_sta->stay_in_tbl) {
1723 for (i = 0; i < IWL_RATE_COUNT; i++)
1724 iwl4965_rs_rate_scale_clear_window(
1725 &(tbl->win[i]));
1726 }
1727 }
1728}
1729
1730/*
1731 * setup rate table in uCode
1732 * return rate_n_flags as used in the table
1733 */
1734static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1735 struct iwl_rxon_context *ctx,
1736 struct iwl_lq_sta *lq_sta,
1737 struct iwl_scale_tbl_info *tbl,
1738 int index, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1744 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1745 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746
1747 return rate;
1748}
1749
1750/*
1751 * Do rate scaling and search for new modulation mode.
1752 */
1753static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1754 struct sk_buff *skb,
1755 struct ieee80211_sta *sta,
1756 struct iwl_lq_sta *lq_sta)
1757{
1758 struct ieee80211_hw *hw = priv->hw;
1759 struct ieee80211_conf *conf = &hw->conf;
1760 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1762 int low = IWL_RATE_INVALID;
1763 int high = IWL_RATE_INVALID;
1764 int index;
1765 int i;
1766 struct iwl_rate_scale_data *window = NULL;
1767 int current_tpt = IWL_INVALID_VALUE;
1768 int low_tpt = IWL_INVALID_VALUE;
1769 int high_tpt = IWL_INVALID_VALUE;
1770 u32 fail_count;
1771 s8 scale_action = 0;
1772 u16 rate_mask;
1773 u8 update_lq = 0;
1774 struct iwl_scale_tbl_info *tbl, *tbl1;
1775 u16 rate_scale_index_msk = 0;
1776 u32 rate;
1777 u8 is_green = 0;
1778 u8 active_tbl = 0;
1779 u8 done_search = 0;
1780 u16 high_low;
1781 s32 sr;
1782 u8 tid = MAX_TID_COUNT;
1783 struct iwl_tid_data *tid_data;
1784 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1785 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1786
1787 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1788
1789 /* Send management frames and NO_ACK data using lowest rate. */
1790 /* TODO: this could probably be improved.. */
1791 if (!ieee80211_is_data(hdr->frame_control) ||
1792 info->flags & IEEE80211_TX_CTL_NO_ACK)
1793 return;
1794
1795 if (!sta || !lq_sta)
1796 return;
1797
1798 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1799
1800 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1801 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1802 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1803 if (tid_data->agg.state == IWL_AGG_OFF)
1804 lq_sta->is_agg = 0;
1805 else
1806 lq_sta->is_agg = 1;
1807 } else
1808 lq_sta->is_agg = 0;
1809
1810 /*
1811 * Select rate-scale / modulation-mode table to work with in
1812 * the rest of this function: "search" if searching for better
1813 * modulation mode, or "active" if doing rate scaling within a mode.
1814 */
1815 if (!lq_sta->search_better_tbl)
1816 active_tbl = lq_sta->active_tbl;
1817 else
1818 active_tbl = 1 - lq_sta->active_tbl;
1819
1820 tbl = &(lq_sta->lq_info[active_tbl]);
1821 if (is_legacy(tbl->lq_type))
1822 lq_sta->is_green = 0;
1823 else
1824 lq_sta->is_green = iwl4965_rs_use_green(sta);
1825 is_green = lq_sta->is_green;
1826
1827 /* current tx rate */
1828 index = lq_sta->last_txrate_idx;
1829
1830 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1831 tbl->lq_type);
1832
1833 /* rates available for this association, and for modulation mode */
1834 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1835
1836 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1837
1838 /* mask with station rate restriction */
1839 if (is_legacy(tbl->lq_type)) {
1840 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1841 /* supp_rates has no CCK bits in A mode */
1842 rate_scale_index_msk = (u16) (rate_mask &
1843 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1844 else
1845 rate_scale_index_msk = (u16) (rate_mask &
1846 lq_sta->supp_rates);
1847
1848 } else
1849 rate_scale_index_msk = rate_mask;
1850
1851 if (!rate_scale_index_msk)
1852 rate_scale_index_msk = rate_mask;
1853
1854 if (!((1 << index) & rate_scale_index_msk)) {
1855 IWL_ERR(priv, "Current Rate is not valid\n");
1856 if (lq_sta->search_better_tbl) {
1857 /* revert to active table if search table is not valid*/
1858 tbl->lq_type = LQ_NONE;
1859 lq_sta->search_better_tbl = 0;
1860 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1861 /* get "active" rate info */
1862 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1863 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1864 tbl, index, is_green);
1865 }
1866 return;
1867 }
1868
1869 /* Get expected throughput table and history window for current rate */
1870 if (!tbl->expected_tpt) {
1871 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1872 return;
1873 }
1874
1875 /* force user max rate if set by user */
1876 if ((lq_sta->max_rate_idx != -1) &&
1877 (lq_sta->max_rate_idx < index)) {
1878 index = lq_sta->max_rate_idx;
1879 update_lq = 1;
1880 window = &(tbl->win[index]);
1881 goto lq_update;
1882 }
1883
1884 window = &(tbl->win[index]);
1885
1886 /*
1887 * If there is not enough history to calculate actual average
1888 * throughput, keep analyzing results of more tx frames, without
1889 * changing rate or mode (bypass most of the rest of this function).
1890 * Set up new rate table in uCode only if old rate is not supported
1891 * in current association (use new rate found above).
1892 */
1893 fail_count = window->counter - window->success_counter;
1894 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1895 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1896 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1897 "for index %d\n",
1898 window->success_counter, window->counter, index);
1899
1900 /* Can't calculate this yet; not enough history */
1901 window->average_tpt = IWL_INVALID_VALUE;
1902
1903 /* Should we stay with this modulation mode,
1904 * or search for a new one? */
1905 iwl4965_rs_stay_in_table(lq_sta, false);
1906
1907 goto out;
1908 }
1909 /* Else we have enough samples; calculate estimate of
1910 * actual average throughput */
1911 if (window->average_tpt != ((window->success_ratio *
1912 tbl->expected_tpt[index] + 64) / 128)) {
1913 IWL_ERR(priv,
1914 "expected_tpt should have been calculated by now\n");
1915 window->average_tpt = ((window->success_ratio *
1916 tbl->expected_tpt[index] + 64) / 128);
1917 }
1918
1919 /* If we are searching for better modulation mode, check success. */
1920 if (lq_sta->search_better_tbl) {
1921 /* If good success, continue using the "search" mode;
1922 * no need to send new link quality command, since we're
1923 * continuing to use the setup that we've been trying. */
1924 if (window->average_tpt > lq_sta->last_tpt) {
1925
1926 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1927 "suc=%d cur-tpt=%d old-tpt=%d\n",
1928 window->success_ratio,
1929 window->average_tpt,
1930 lq_sta->last_tpt);
1931
1932 if (!is_legacy(tbl->lq_type))
1933 lq_sta->enable_counter = 1;
1934
1935 /* Swap tables; "search" becomes "active" */
1936 lq_sta->active_tbl = active_tbl;
1937 current_tpt = window->average_tpt;
1938
1939 /* Else poor success; go back to mode in "active" table */
1940 } else {
1941
1942 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1943 "suc=%d cur-tpt=%d old-tpt=%d\n",
1944 window->success_ratio,
1945 window->average_tpt,
1946 lq_sta->last_tpt);
1947
1948 /* Nullify "search" table */
1949 tbl->lq_type = LQ_NONE;
1950
1951 /* Revert to "active" table */
1952 active_tbl = lq_sta->active_tbl;
1953 tbl = &(lq_sta->lq_info[active_tbl]);
1954
1955 /* Revert to "active" rate and throughput info */
1956 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1957 current_tpt = lq_sta->last_tpt;
1958
1959 /* Need to set up a new rate table in uCode */
1960 update_lq = 1;
1961 }
1962
1963 /* Either way, we've made a decision; modulation mode
1964 * search is done, allow rate adjustment next time. */
1965 lq_sta->search_better_tbl = 0;
1966 done_search = 1; /* Don't switch modes below! */
1967 goto lq_update;
1968 }
1969
1970 /* (Else) not in search of better modulation mode, try for better
1971 * starting rate, while staying in this mode. */
1972 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1973 rate_scale_index_msk,
1974 tbl->lq_type);
1975 low = high_low & 0xff;
1976 high = (high_low >> 8) & 0xff;
1977
1978 /* If user set max rate, dont allow higher than user constrain */
1979 if ((lq_sta->max_rate_idx != -1) &&
1980 (lq_sta->max_rate_idx < high))
1981 high = IWL_RATE_INVALID;
1982
1983 sr = window->success_ratio;
1984
1985 /* Collect measured throughputs for current and adjacent rates */
1986 current_tpt = window->average_tpt;
1987 if (low != IWL_RATE_INVALID)
1988 low_tpt = tbl->win[low].average_tpt;
1989 if (high != IWL_RATE_INVALID)
1990 high_tpt = tbl->win[high].average_tpt;
1991
1992 scale_action = 0;
1993
1994 /* Too many failures, decrease rate */
1995 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1996 IWL_DEBUG_RATE(priv,
1997 "decrease rate because of low success_ratio\n");
1998 scale_action = -1;
1999
2000 /* No throughput measured yet for adjacent rates; try increase. */
2001 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2002 (high_tpt == IWL_INVALID_VALUE)) {
2003
2004 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2005 scale_action = 1;
2006 else if (low != IWL_RATE_INVALID)
2007 scale_action = 0;
2008 }
2009
2010 /* Both adjacent throughputs are measured, but neither one has better
2011 * throughput; we're using the best rate, don't change it! */
2012 else if ((low_tpt != IWL_INVALID_VALUE) &&
2013 (high_tpt != IWL_INVALID_VALUE) &&
2014 (low_tpt < current_tpt) &&
2015 (high_tpt < current_tpt))
2016 scale_action = 0;
2017
2018 /* At least one adjacent rate's throughput is measured,
2019 * and may have better performance. */
2020 else {
2021 /* Higher adjacent rate's throughput is measured */
2022 if (high_tpt != IWL_INVALID_VALUE) {
2023 /* Higher rate has better throughput */
2024 if (high_tpt > current_tpt &&
2025 sr >= IWL_RATE_INCREASE_TH) {
2026 scale_action = 1;
2027 } else {
2028 scale_action = 0;
2029 }
2030
2031 /* Lower adjacent rate's throughput is measured */
2032 } else if (low_tpt != IWL_INVALID_VALUE) {
2033 /* Lower rate has better throughput */
2034 if (low_tpt > current_tpt) {
2035 IWL_DEBUG_RATE(priv,
2036 "decrease rate because of low tpt\n");
2037 scale_action = -1;
2038 } else if (sr >= IWL_RATE_INCREASE_TH) {
2039 scale_action = 1;
2040 }
2041 }
2042 }
2043
2044 /* Sanity check; asked for decrease, but success rate or throughput
2045 * has been good at old rate. Don't change it. */
2046 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2047 ((sr > IWL_RATE_HIGH_TH) ||
2048 (current_tpt > (100 * tbl->expected_tpt[low]))))
2049 scale_action = 0;
2050
2051 switch (scale_action) {
2052 case -1:
2053 /* Decrease starting rate, update uCode's rate table */
2054 if (low != IWL_RATE_INVALID) {
2055 update_lq = 1;
2056 index = low;
2057 }
2058
2059 break;
2060 case 1:
2061 /* Increase starting rate, update uCode's rate table */
2062 if (high != IWL_RATE_INVALID) {
2063 update_lq = 1;
2064 index = high;
2065 }
2066
2067 break;
2068 case 0:
2069 /* No change */
2070 default:
2071 break;
2072 }
2073
2074 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2075 "high %d type %d\n",
2076 index, scale_action, low, high, tbl->lq_type);
2077
2078lq_update:
2079 /* Replace uCode's rate table for the destination station. */
2080 if (update_lq)
2081 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2082 tbl, index, is_green);
2083
2084 /* Should we stay with this modulation mode,
2085 * or search for a new one? */
2086 iwl4965_rs_stay_in_table(lq_sta, false);
2087
2088 /*
2089 * Search for new modulation mode if we're:
2090 * 1) Not changing rates right now
2091 * 2) Not just finishing up a search
2092 * 3) Allowing a new search
2093 */
2094 if (!update_lq && !done_search &&
2095 !lq_sta->stay_in_tbl && window->counter) {
2096 /* Save current throughput to compare with "search" throughput*/
2097 lq_sta->last_tpt = current_tpt;
2098
2099 /* Select a new "search" modulation mode to try.
2100 * If one is found, set up the new "search" table. */
2101 if (is_legacy(tbl->lq_type))
2102 iwl4965_rs_move_legacy_other(priv, lq_sta,
2103 conf, sta, index);
2104 else if (is_siso(tbl->lq_type))
2105 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2106 conf, sta, index);
2107 else /* (is_mimo2(tbl->lq_type)) */
2108 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2109 conf, sta, index);
2110
2111 /* If new "search" mode was selected, set up in uCode table */
2112 if (lq_sta->search_better_tbl) {
2113 /* Access the "search" table, clear its history. */
2114 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2115 for (i = 0; i < IWL_RATE_COUNT; i++)
2116 iwl4965_rs_rate_scale_clear_window(
2117 &(tbl->win[i]));
2118
2119 /* Use new "search" start rate */
2120 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2121
2122 IWL_DEBUG_RATE(priv,
2123 "Switch current mcs: %X index: %d\n",
2124 tbl->current_rate, index);
2125 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2126 tbl->current_rate);
2127 iwl_legacy_send_lq_cmd(priv, ctx,
2128 &lq_sta->lq, CMD_ASYNC, false);
2129 } else
2130 done_search = 1;
2131 }
2132
2133 if (done_search && !lq_sta->stay_in_tbl) {
2134 /* If the "active" (non-search) mode was legacy,
2135 * and we've tried switching antennas,
2136 * but we haven't been able to try HT modes (not available),
2137 * stay with best antenna legacy modulation for a while
2138 * before next round of mode comparisons. */
2139 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2140 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2141 lq_sta->action_counter > tbl1->max_search) {
2142 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2143 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2144 }
2145
2146 /* If we're in an HT mode, and all 3 mode switch actions
2147 * have been tried and compared, stay in this best modulation
2148 * mode for a while before next round of mode comparisons. */
2149 if (lq_sta->enable_counter &&
2150 (lq_sta->action_counter >= tbl1->max_search)) {
2151 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2152 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2153 (tid != MAX_TID_COUNT)) {
2154 tid_data =
2155 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2156 if (tid_data->agg.state == IWL_AGG_OFF) {
2157 IWL_DEBUG_RATE(priv,
2158 "try to aggregate tid %d\n",
2159 tid);
2160 iwl4965_rs_tl_turn_on_agg(priv, tid,
2161 lq_sta, sta);
2162 }
2163 }
2164 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2165 }
2166 }
2167
2168out:
2169 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2170 index, is_green);
2171 i = index;
2172 lq_sta->last_txrate_idx = i;
2173}
2174
2175/**
2176 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2177 *
2178 * The uCode's station table contains a table of fallback rates
2179 * for automatic fallback during transmission.
2180 *
2181 * NOTE: This sets up a default set of values. These will be replaced later
2182 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2183 * rc80211_simple.
2184 *
2185 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2186 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2187 * which requires station table entry to exist).
2188 */
2189static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2190 struct ieee80211_conf *conf,
2191 struct ieee80211_sta *sta,
2192 struct iwl_lq_sta *lq_sta)
2193{
2194 struct iwl_scale_tbl_info *tbl;
2195 int rate_idx;
2196 int i;
2197 u32 rate;
2198 u8 use_green = iwl4965_rs_use_green(sta);
2199 u8 active_tbl = 0;
2200 u8 valid_tx_ant;
2201 struct iwl_station_priv *sta_priv;
2202 struct iwl_rxon_context *ctx;
2203
2204 if (!sta || !lq_sta)
2205 return;
2206
2207 sta_priv = (void *)sta->drv_priv;
2208 ctx = sta_priv->common.ctx;
2209
2210 i = lq_sta->last_txrate_idx;
2211
2212 valid_tx_ant = priv->hw_params.valid_tx_ant;
2213
2214 if (!lq_sta->search_better_tbl)
2215 active_tbl = lq_sta->active_tbl;
2216 else
2217 active_tbl = 1 - lq_sta->active_tbl;
2218
2219 tbl = &(lq_sta->lq_info[active_tbl]);
2220
2221 if ((i < 0) || (i >= IWL_RATE_COUNT))
2222 i = 0;
2223
2224 rate = iwlegacy_rates[i].plcp;
2225 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2226 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2227
2228 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2229 rate |= RATE_MCS_CCK_MSK;
2230
2231 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2232 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2233 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2234
2235 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2236 tbl->current_rate = rate;
2237 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2238 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2239 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2240 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2241}
2242
2243static void
2244iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2245 struct ieee80211_tx_rate_control *txrc)
2246{
2247
2248 struct sk_buff *skb = txrc->skb;
2249 struct ieee80211_supported_band *sband = txrc->sband;
2250 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2252 struct iwl_lq_sta *lq_sta = priv_sta;
2253 int rate_idx;
2254
2255 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2256
2257 /* Get max rate if user set max rate */
2258 if (lq_sta) {
2259 lq_sta->max_rate_idx = txrc->max_rate_idx;
2260 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2261 (lq_sta->max_rate_idx != -1))
2262 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2263 if ((lq_sta->max_rate_idx < 0) ||
2264 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2265 lq_sta->max_rate_idx = -1;
2266 }
2267
2268 /* Treat uninitialized rate scaling data same as non-existing. */
2269 if (lq_sta && !lq_sta->drv) {
2270 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2271 priv_sta = NULL;
2272 }
2273
2274 /* Send management frames and NO_ACK data using lowest rate. */
2275 if (rate_control_send_low(sta, priv_sta, txrc))
2276 return;
2277
2278 rate_idx = lq_sta->last_txrate_idx;
2279
2280 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2281 rate_idx -= IWL_FIRST_OFDM_RATE;
2282 /* 6M and 9M shared same MCS index */
2283 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2284 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2285 IWL_RATE_MIMO2_6M_PLCP)
2286 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2287 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2288 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2289 info->control.rates[0].flags |=
2290 IEEE80211_TX_RC_SHORT_GI;
2291 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2292 info->control.rates[0].flags |=
2293 IEEE80211_TX_RC_DUP_DATA;
2294 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2295 info->control.rates[0].flags |=
2296 IEEE80211_TX_RC_40_MHZ_WIDTH;
2297 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2298 info->control.rates[0].flags |=
2299 IEEE80211_TX_RC_GREEN_FIELD;
2300 } else {
2301 /* Check for invalid rates */
2302 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2303 ((sband->band == IEEE80211_BAND_5GHZ) &&
2304 (rate_idx < IWL_FIRST_OFDM_RATE)))
2305 rate_idx = rate_lowest_index(sband, sta);
2306 /* On valid 5 GHz rate, adjust index */
2307 else if (sband->band == IEEE80211_BAND_5GHZ)
2308 rate_idx -= IWL_FIRST_OFDM_RATE;
2309 info->control.rates[0].flags = 0;
2310 }
2311 info->control.rates[0].idx = rate_idx;
2312
2313}
2314
2315static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2316 gfp_t gfp)
2317{
2318 struct iwl_lq_sta *lq_sta;
2319 struct iwl_station_priv *sta_priv =
2320 (struct iwl_station_priv *) sta->drv_priv;
2321 struct iwl_priv *priv;
2322
2323 priv = (struct iwl_priv *)priv_rate;
2324 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2325
2326 lq_sta = &sta_priv->lq_sta;
2327
2328 return lq_sta;
2329}
2330
2331/*
2332 * Called after adding a new station to initialize rate scaling
2333 */
2334void
2335iwl4965_rs_rate_init(struct iwl_priv *priv,
2336 struct ieee80211_sta *sta,
2337 u8 sta_id)
2338{
2339 int i, j;
2340 struct ieee80211_hw *hw = priv->hw;
2341 struct ieee80211_conf *conf = &priv->hw->conf;
2342 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2343 struct iwl_station_priv *sta_priv;
2344 struct iwl_lq_sta *lq_sta;
2345 struct ieee80211_supported_band *sband;
2346
2347 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2348 lq_sta = &sta_priv->lq_sta;
2349 sband = hw->wiphy->bands[conf->channel->band];
2350
2351
2352 lq_sta->lq.sta_id = sta_id;
2353
2354 for (j = 0; j < LQ_SIZE; j++)
2355 for (i = 0; i < IWL_RATE_COUNT; i++)
2356 iwl4965_rs_rate_scale_clear_window(
2357 &lq_sta->lq_info[j].win[i]);
2358
2359 lq_sta->flush_timer = 0;
2360 lq_sta->supp_rates = sta->supp_rates[sband->band];
2361 for (j = 0; j < LQ_SIZE; j++)
2362 for (i = 0; i < IWL_RATE_COUNT; i++)
2363 iwl4965_rs_rate_scale_clear_window(
2364 &lq_sta->lq_info[j].win[i]);
2365
2366 IWL_DEBUG_RATE(priv, "LQ:"
2367 "*** rate scale station global init for station %d ***\n",
2368 sta_id);
2369 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2370 * the lowest or the highest rate.. Could consider using RSSI from
2371 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2372 * after assoc.. */
2373
2374 lq_sta->is_dup = 0;
2375 lq_sta->max_rate_idx = -1;
2376 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2377 lq_sta->is_green = iwl4965_rs_use_green(sta);
2378 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2379 lq_sta->band = priv->band;
2380 /*
2381 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2382 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2383 */
2384 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2385 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2386 lq_sta->active_siso_rate &= ~((u16)0x2);
2387 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2388
2389 /* Same here */
2390 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2391 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2392 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2393 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2394
2395 /* These values will be overridden later */
2396 lq_sta->lq.general_params.single_stream_ant_msk =
2397 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2398 lq_sta->lq.general_params.dual_stream_ant_msk =
2399 priv->hw_params.valid_tx_ant &
2400 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2401 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2402 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2403 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk =
2405 priv->hw_params.valid_tx_ant;
2406 }
2407
2408 /* as default allow aggregation for all tids */
2409 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2410 lq_sta->drv = priv;
2411
2412 /* Set last_txrate_idx to lowest rate */
2413 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2414 if (sband->band == IEEE80211_BAND_5GHZ)
2415 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2416 lq_sta->is_agg = 0;
2417
2418#ifdef CONFIG_MAC80211_DEBUGFS
2419 lq_sta->dbg_fixed_rate = 0;
2420#endif
2421
2422 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2423}
2424
2425static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2426 struct iwl_lq_sta *lq_sta, u32 new_rate)
2427{
2428 struct iwl_scale_tbl_info tbl_type;
2429 int index = 0;
2430 int rate_idx;
2431 int repeat_rate = 0;
2432 u8 ant_toggle_cnt = 0;
2433 u8 use_ht_possible = 1;
2434 u8 valid_tx_ant = 0;
2435 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2436
2437 /* Override starting rate (index 0) if needed for debug purposes */
2438 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2439
2440 /* Interpret new_rate (rate_n_flags) */
2441 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2442 &tbl_type, &rate_idx);
2443
2444 /* How many times should we repeat the initial rate? */
2445 if (is_legacy(tbl_type.lq_type)) {
2446 ant_toggle_cnt = 1;
2447 repeat_rate = IWL_NUMBER_TRY;
2448 } else {
2449 repeat_rate = IWL_HT_NUMBER_TRY;
2450 }
2451
2452 lq_cmd->general_params.mimo_delimiter =
2453 is_mimo(tbl_type.lq_type) ? 1 : 0;
2454
2455 /* Fill 1st table entry (index 0) */
2456 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2457
2458 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2459 lq_cmd->general_params.single_stream_ant_msk =
2460 tbl_type.ant_type;
2461 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2462 lq_cmd->general_params.dual_stream_ant_msk =
2463 tbl_type.ant_type;
2464 } /* otherwise we don't modify the existing value */
2465
2466 index++;
2467 repeat_rate--;
2468 if (priv)
2469 valid_tx_ant = priv->hw_params.valid_tx_ant;
2470
2471 /* Fill rest of rate table */
2472 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2473 /* Repeat initial/next rate.
2474 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2475 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2476 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2477 if (is_legacy(tbl_type.lq_type)) {
2478 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2479 ant_toggle_cnt++;
2480 else if (priv &&
2481 iwl4965_rs_toggle_antenna(valid_tx_ant,
2482 &new_rate, &tbl_type))
2483 ant_toggle_cnt = 1;
2484 }
2485
2486 /* Override next rate if needed for debug purposes */
2487 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2488
2489 /* Fill next table entry */
2490 lq_cmd->rs_table[index].rate_n_flags =
2491 cpu_to_le32(new_rate);
2492 repeat_rate--;
2493 index++;
2494 }
2495
2496 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2497 lq_sta->band, &tbl_type,
2498 &rate_idx);
2499
2500 /* Indicate to uCode which entries might be MIMO.
2501 * If initial rate was MIMO, this will finally end up
2502 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2503 if (is_mimo(tbl_type.lq_type))
2504 lq_cmd->general_params.mimo_delimiter = index;
2505
2506 /* Get next rate */
2507 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2508 &tbl_type, rate_idx,
2509 use_ht_possible);
2510
2511 /* How many times should we repeat the next rate? */
2512 if (is_legacy(tbl_type.lq_type)) {
2513 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2514 ant_toggle_cnt++;
2515 else if (priv &&
2516 iwl4965_rs_toggle_antenna(valid_tx_ant,
2517 &new_rate, &tbl_type))
2518 ant_toggle_cnt = 1;
2519
2520 repeat_rate = IWL_NUMBER_TRY;
2521 } else {
2522 repeat_rate = IWL_HT_NUMBER_TRY;
2523 }
2524
2525 /* Don't allow HT rates after next pass.
2526 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2527 use_ht_possible = 0;
2528
2529 /* Override next rate if needed for debug purposes */
2530 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2531
2532 /* Fill next table entry */
2533 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2534
2535 index++;
2536 repeat_rate--;
2537 }
2538
2539 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2540 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2541
2542 lq_cmd->agg_params.agg_time_limit =
2543 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2544}
2545
2546static void
2547*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2548{
2549 return hw->priv;
2550}
2551/* rate scale requires free function to be implemented */
2552static void iwl4965_rs_free(void *priv_rate)
2553{
2554 return;
2555}
2556
2557static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2558 void *priv_sta)
2559{
2560 struct iwl_priv *priv __maybe_unused = priv_r;
2561
2562 IWL_DEBUG_RATE(priv, "enter\n");
2563 IWL_DEBUG_RATE(priv, "leave\n");
2564}
2565
2566
2567#ifdef CONFIG_MAC80211_DEBUGFS
2568static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2569{
2570 file->private_data = inode->i_private;
2571 return 0;
2572}
2573static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2574 u32 *rate_n_flags, int index)
2575{
2576 struct iwl_priv *priv;
2577 u8 valid_tx_ant;
2578 u8 ant_sel_tx;
2579
2580 priv = lq_sta->drv;
2581 valid_tx_ant = priv->hw_params.valid_tx_ant;
2582 if (lq_sta->dbg_fixed_rate) {
2583 ant_sel_tx =
2584 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2585 >> RATE_MCS_ANT_POS);
2586 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2587 *rate_n_flags = lq_sta->dbg_fixed_rate;
2588 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2589 } else {
2590 lq_sta->dbg_fixed_rate = 0;
2591 IWL_ERR(priv,
2592 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2593 ant_sel_tx, valid_tx_ant);
2594 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2595 }
2596 } else {
2597 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2598 }
2599}
2600
2601static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2602 const char __user *user_buf, size_t count, loff_t *ppos)
2603{
2604 struct iwl_lq_sta *lq_sta = file->private_data;
2605 struct iwl_priv *priv;
2606 char buf[64];
2607 int buf_size;
2608 u32 parsed_rate;
2609 struct iwl_station_priv *sta_priv =
2610 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2611 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2612
2613 priv = lq_sta->drv;
2614 memset(buf, 0, sizeof(buf));
2615 buf_size = min(count, sizeof(buf) - 1);
2616 if (copy_from_user(buf, user_buf, buf_size))
2617 return -EFAULT;
2618
2619 if (sscanf(buf, "%x", &parsed_rate) == 1)
2620 lq_sta->dbg_fixed_rate = parsed_rate;
2621 else
2622 lq_sta->dbg_fixed_rate = 0;
2623
2624 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2625 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2626 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2627
2628 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2629 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2630
2631 if (lq_sta->dbg_fixed_rate) {
2632 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2633 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2634 false);
2635 }
2636
2637 return count;
2638}
2639
2640static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2641 char __user *user_buf, size_t count, loff_t *ppos)
2642{
2643 char *buff;
2644 int desc = 0;
2645 int i = 0;
2646 int index = 0;
2647 ssize_t ret;
2648
2649 struct iwl_lq_sta *lq_sta = file->private_data;
2650 struct iwl_priv *priv;
2651 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2652
2653 priv = lq_sta->drv;
2654 buff = kmalloc(1024, GFP_KERNEL);
2655 if (!buff)
2656 return -ENOMEM;
2657
2658 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2659 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2660 lq_sta->total_failed, lq_sta->total_success,
2661 lq_sta->active_legacy_rate);
2662 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2663 lq_sta->dbg_fixed_rate);
2664 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2665 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2666 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2667 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2668 desc += sprintf(buff+desc, "lq type %s\n",
2669 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2670 if (is_Ht(tbl->lq_type)) {
2671 desc += sprintf(buff+desc, " %s",
2672 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2673 desc += sprintf(buff+desc, " %s",
2674 (tbl->is_ht40) ? "40MHz" : "20MHz");
2675 desc += sprintf(buff+desc, " %s %s %s\n",
2676 (tbl->is_SGI) ? "SGI" : "",
2677 (lq_sta->is_green) ? "GF enabled" : "",
2678 (lq_sta->is_agg) ? "AGG on" : "");
2679 }
2680 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2681 lq_sta->last_rate_n_flags);
2682 desc += sprintf(buff+desc, "general:"
2683 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2684 lq_sta->lq.general_params.flags,
2685 lq_sta->lq.general_params.mimo_delimiter,
2686 lq_sta->lq.general_params.single_stream_ant_msk,
2687 lq_sta->lq.general_params.dual_stream_ant_msk);
2688
2689 desc += sprintf(buff+desc, "agg:"
2690 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2691 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2692 lq_sta->lq.agg_params.agg_dis_start_th,
2693 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2694
2695 desc += sprintf(buff+desc,
2696 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2697 lq_sta->lq.general_params.start_rate_index[0],
2698 lq_sta->lq.general_params.start_rate_index[1],
2699 lq_sta->lq.general_params.start_rate_index[2],
2700 lq_sta->lq.general_params.start_rate_index[3]);
2701
2702 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2703 index = iwl4965_hwrate_to_plcp_idx(
2704 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2705 if (is_legacy(tbl->lq_type)) {
2706 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2707 i,
2708 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2709 iwl_rate_mcs[index].mbps);
2710 } else {
2711 desc += sprintf(buff+desc,
2712 " rate[%d] 0x%X %smbps (%s)\n",
2713 i,
2714 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2715 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2716 }
2717 }
2718
2719 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2720 kfree(buff);
2721 return ret;
2722}
2723
2724static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2725 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2726 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2727 .open = iwl4965_open_file_generic,
2728 .llseek = default_llseek,
2729};
2730static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2731 char __user *user_buf, size_t count, loff_t *ppos)
2732{
2733 char *buff;
2734 int desc = 0;
2735 int i, j;
2736 ssize_t ret;
2737
2738 struct iwl_lq_sta *lq_sta = file->private_data;
2739
2740 buff = kmalloc(1024, GFP_KERNEL);
2741 if (!buff)
2742 return -ENOMEM;
2743
2744 for (i = 0; i < LQ_SIZE; i++) {
2745 desc += sprintf(buff+desc,
2746 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2747 "rate=0x%X\n",
2748 lq_sta->active_tbl == i ? "*" : "x",
2749 lq_sta->lq_info[i].lq_type,
2750 lq_sta->lq_info[i].is_SGI,
2751 lq_sta->lq_info[i].is_ht40,
2752 lq_sta->lq_info[i].is_dup,
2753 lq_sta->is_green,
2754 lq_sta->lq_info[i].current_rate);
2755 for (j = 0; j < IWL_RATE_COUNT; j++) {
2756 desc += sprintf(buff+desc,
2757 "counter=%d success=%d %%=%d\n",
2758 lq_sta->lq_info[i].win[j].counter,
2759 lq_sta->lq_info[i].win[j].success_counter,
2760 lq_sta->lq_info[i].win[j].success_ratio);
2761 }
2762 }
2763 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2764 kfree(buff);
2765 return ret;
2766}
2767
2768static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2769 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2770 .open = iwl4965_open_file_generic,
2771 .llseek = default_llseek,
2772};
2773
2774static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2775 char __user *user_buf, size_t count, loff_t *ppos)
2776{
2777 char buff[120];
2778 int desc = 0;
2779 ssize_t ret;
2780
2781 struct iwl_lq_sta *lq_sta = file->private_data;
2782 struct iwl_priv *priv;
2783 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2784
2785 priv = lq_sta->drv;
2786
2787 if (is_Ht(tbl->lq_type))
2788 desc += sprintf(buff+desc,
2789 "Bit Rate= %d Mb/s\n",
2790 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2791 else
2792 desc += sprintf(buff+desc,
2793 "Bit Rate= %d Mb/s\n",
2794 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2795
2796 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2797 return ret;
2798}
2799
2800static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2801 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2802 .open = iwl4965_open_file_generic,
2803 .llseek = default_llseek,
2804};
2805
2806static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2807 struct dentry *dir)
2808{
2809 struct iwl_lq_sta *lq_sta = priv_sta;
2810 lq_sta->rs_sta_dbgfs_scale_table_file =
2811 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2812 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2813 lq_sta->rs_sta_dbgfs_stats_table_file =
2814 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2815 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2816 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2817 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2818 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2819 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2820 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2821 &lq_sta->tx_agg_tid_en);
2822
2823}
2824
2825static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2826{
2827 struct iwl_lq_sta *lq_sta = priv_sta;
2828 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2829 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2832}
2833#endif
2834
2835/*
2836 * Initialization of rate scaling information is done by driver after
2837 * the station is added. Since mac80211 calls this function before a
2838 * station is added we ignore it.
2839 */
2840static void
2841iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2842 struct ieee80211_sta *sta, void *priv_sta)
2843{
2844}
2845static struct rate_control_ops rs_4965_ops = {
2846 .module = NULL,
2847 .name = IWL4965_RS_NAME,
2848 .tx_status = iwl4965_rs_tx_status,
2849 .get_rate = iwl4965_rs_get_rate,
2850 .rate_init = iwl4965_rs_rate_init_stub,
2851 .alloc = iwl4965_rs_alloc,
2852 .free = iwl4965_rs_free,
2853 .alloc_sta = iwl4965_rs_alloc_sta,
2854 .free_sta = iwl4965_rs_free_sta,
2855#ifdef CONFIG_MAC80211_DEBUGFS
2856 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2857 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2858#endif
2859};
2860
2861int iwl4965_rate_control_register(void)
2862{
2863 pr_err("Registering 4965 rate control operations\n");
2864 return ieee80211_rate_control_register(&rs_4965_ops);
2865}
2866
2867void iwl4965_rate_control_unregister(void)
2868{
2869 ieee80211_rate_control_unregister(&rs_4965_ops);
2870}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index b192ca842f0a..b9fa2f6411a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -34,14 +34,14 @@
34 34
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-agn-calib.h" 37#include "iwl-4965-calib.h"
38#include "iwl-sta.h" 38#include "iwl-sta.h"
39#include "iwl-io.h" 39#include "iwl-io.h"
40#include "iwl-helpers.h" 40#include "iwl-helpers.h"
41#include "iwl-agn-hw.h" 41#include "iwl-4965-hw.h"
42#include "iwl-agn.h" 42#include "iwl-4965.h"
43 43
44void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb) 45 struct iwl_rx_mem_buffer *rxb)
46 46
47{ 47{
@@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
58 le32_to_cpu(missed_beacon->num_recvd_beacons), 58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons)); 59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status)) 60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl_init_sensitivity(priv); 61 iwl4965_init_sensitivity(priv);
62 } 62 }
63} 63}
64 64
65/* Calculate noise level, based on measurements during network silence just 65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know 66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */ 67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl_rx_calc_noise(struct iwl_priv *priv) 68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{ 69{
70 struct statistics_rx_non_phy *rx_info; 70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0; 71 int num_active_rx = 0;
@@ -73,10 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c; 73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise; 74 int last_rx_noise;
75 75
76 if (iwl_bt_statistics(priv)) 76 rx_info = &(priv->_4965.statistics.rx.general);
77 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
78 else
79 rx_info = &(priv->_agn.statistics.rx.general);
80 bcn_silence_a = 77 bcn_silence_a =
81 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; 78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
82 bcn_silence_b = 79 bcn_silence_b =
@@ -108,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
108 last_rx_noise); 105 last_rx_noise);
109} 106}
110 107
111#ifdef CONFIG_IWLWIFI_DEBUGFS 108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
112/* 109/*
113 * based on the assumption of all statistics counter are in DWORD 110 * based on the assumption of all statistics counter are in DWORD
114 * FIXME: This function is for debugging, do not deal with 111 * FIXME: This function is for debugging, do not deal with
115 * the case of counters roll-over. 112 * the case of counters roll-over.
116 */ 113 */
117static void iwl_accumulative_statistics(struct iwl_priv *priv, 114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
118 __le32 *stats) 115 __le32 *stats)
119{ 116{
120 int i, size; 117 int i, size;
@@ -124,27 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
124 struct statistics_general_common *general, *accum_general; 121 struct statistics_general_common *general, *accum_general;
125 struct statistics_tx *tx, *accum_tx; 122 struct statistics_tx *tx, *accum_tx;
126 123
127 if (iwl_bt_statistics(priv)) { 124 prev_stats = (__le32 *)&priv->_4965.statistics;
128 prev_stats = (__le32 *)&priv->_agn.statistics_bt; 125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
129 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; 126 size = sizeof(struct iwl_notif_statistics);
130 size = sizeof(struct iwl_bt_notif_statistics); 127 general = &priv->_4965.statistics.general.common;
131 general = &priv->_agn.statistics_bt.general.common; 128 accum_general = &priv->_4965.accum_statistics.general.common;
132 accum_general = &priv->_agn.accum_statistics_bt.general.common; 129 tx = &priv->_4965.statistics.tx;
133 tx = &priv->_agn.statistics_bt.tx; 130 accum_tx = &priv->_4965.accum_statistics.tx;
134 accum_tx = &priv->_agn.accum_statistics_bt.tx; 131 delta = (u32 *)&priv->_4965.delta_statistics;
135 delta = (u32 *)&priv->_agn.delta_statistics_bt; 132 max_delta = (u32 *)&priv->_4965.max_delta;
136 max_delta = (u32 *)&priv->_agn.max_delta_bt; 133
137 } else {
138 prev_stats = (__le32 *)&priv->_agn.statistics;
139 accum_stats = (u32 *)&priv->_agn.accum_statistics;
140 size = sizeof(struct iwl_notif_statistics);
141 general = &priv->_agn.statistics.general.common;
142 accum_general = &priv->_agn.accum_statistics.general.common;
143 tx = &priv->_agn.statistics.tx;
144 accum_tx = &priv->_agn.accum_statistics.tx;
145 delta = (u32 *)&priv->_agn.delta_statistics;
146 max_delta = (u32 *)&priv->_agn.max_delta;
147 }
148 for (i = sizeof(__le32); i < size; 134 for (i = sizeof(__le32); i < size;
149 i += sizeof(__le32), stats++, prev_stats++, delta++, 135 i += sizeof(__le32), stats++, prev_stats++, delta++,
150 max_delta++, accum_stats++) { 136 max_delta++, accum_stats++) {
@@ -159,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
159 145
160 /* reset accumulative statistics for "no-counter" type statistics */ 146 /* reset accumulative statistics for "no-counter" type statistics */
161 accum_general->temperature = general->temperature; 147 accum_general->temperature = general->temperature;
162 accum_general->temperature_m = general->temperature_m;
163 accum_general->ttl_timestamp = general->ttl_timestamp; 148 accum_general->ttl_timestamp = general->ttl_timestamp;
164 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
165 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
166 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
167} 149}
168#endif 150#endif
169 151
170#define REG_RECALIB_PERIOD (60) 152#define REG_RECALIB_PERIOD (60)
171 153
172/** 154/**
173 * iwl_good_plcp_health - checks for plcp error. 155 * iwl4965_good_plcp_health - checks for plcp error.
174 * 156 *
175 * When the plcp error is exceeding the thresholds, reset the radio 157 * When the plcp error is exceeding the thresholds, reset the radio
176 * to improve the throughput. 158 * to improve the throughput.
177 */ 159 */
178bool iwl_good_plcp_health(struct iwl_priv *priv, 160bool iwl4965_good_plcp_health(struct iwl_priv *priv,
179 struct iwl_rx_packet *pkt) 161 struct iwl_rx_packet *pkt)
180{ 162{
181 bool rc = true; 163 bool rc = true;
@@ -205,27 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
205 struct statistics_rx_phy *ofdm; 187 struct statistics_rx_phy *ofdm;
206 struct statistics_rx_ht_phy *ofdm_ht; 188 struct statistics_rx_ht_phy *ofdm_ht;
207 189
208 if (iwl_bt_statistics(priv)) { 190 ofdm = &pkt->u.stats.rx.ofdm;
209 ofdm = &pkt->u.stats_bt.rx.ofdm; 191 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
210 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; 192 combined_plcp_delta =
211 combined_plcp_delta = 193 (le32_to_cpu(ofdm->plcp_err) -
212 (le32_to_cpu(ofdm->plcp_err) - 194 le32_to_cpu(priv->_4965.statistics.
213 le32_to_cpu(priv->_agn.statistics_bt. 195 rx.ofdm.plcp_err)) +
214 rx.ofdm.plcp_err)) + 196 (le32_to_cpu(ofdm_ht->plcp_err) -
215 (le32_to_cpu(ofdm_ht->plcp_err) - 197 le32_to_cpu(priv->_4965.statistics.
216 le32_to_cpu(priv->_agn.statistics_bt. 198 rx.ofdm_ht.plcp_err));
217 rx.ofdm_ht.plcp_err));
218 } else {
219 ofdm = &pkt->u.stats.rx.ofdm;
220 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
221 combined_plcp_delta =
222 (le32_to_cpu(ofdm->plcp_err) -
223 le32_to_cpu(priv->_agn.statistics.
224 rx.ofdm.plcp_err)) +
225 (le32_to_cpu(ofdm_ht->plcp_err) -
226 le32_to_cpu(priv->_agn.statistics.
227 rx.ofdm_ht.plcp_err));
228 }
229 199
230 if ((combined_plcp_delta > 0) && 200 if ((combined_plcp_delta > 0) &&
231 ((combined_plcp_delta * 100) / plcp_msec) > 201 ((combined_plcp_delta * 100) / plcp_msec) >
@@ -256,56 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
256 return rc; 226 return rc;
257} 227}
258 228
259void iwl_rx_statistics(struct iwl_priv *priv, 229void iwl4965_rx_statistics(struct iwl_priv *priv,
260 struct iwl_rx_mem_buffer *rxb) 230 struct iwl_rx_mem_buffer *rxb)
261{ 231{
262 int change; 232 int change;
263 struct iwl_rx_packet *pkt = rxb_addr(rxb); 233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
264 234
265 if (iwl_bt_statistics(priv)) { 235 IWL_DEBUG_RX(priv,
266 IWL_DEBUG_RX(priv, 236 "Statistics notification received (%d vs %d).\n",
267 "Statistics notification received (%d vs %d).\n", 237 (int)sizeof(struct iwl_notif_statistics),
268 (int)sizeof(struct iwl_bt_notif_statistics), 238 le32_to_cpu(pkt->len_n_flags) &
269 le32_to_cpu(pkt->len_n_flags) & 239 FH_RSCSR_FRAME_SIZE_MSK);
270 FH_RSCSR_FRAME_SIZE_MSK); 240
271 241 change = ((priv->_4965.statistics.general.common.temperature !=
272 change = ((priv->_agn.statistics_bt.general.common.temperature != 242 pkt->u.stats.general.common.temperature) ||
273 pkt->u.stats_bt.general.common.temperature) || 243 ((priv->_4965.statistics.flag &
274 ((priv->_agn.statistics_bt.flag & 244 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
275 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 245 (pkt->u.stats.flag &
276 (pkt->u.stats_bt.flag & 246 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
277 STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 247#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
278#ifdef CONFIG_IWLWIFI_DEBUGFS 248 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
279 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
280#endif 249#endif
281 250
282 } else { 251 iwl_legacy_recover_from_statistics(priv, pkt);
283 IWL_DEBUG_RX(priv,
284 "Statistics notification received (%d vs %d).\n",
285 (int)sizeof(struct iwl_notif_statistics),
286 le32_to_cpu(pkt->len_n_flags) &
287 FH_RSCSR_FRAME_SIZE_MSK);
288 252
289 change = ((priv->_agn.statistics.general.common.temperature != 253 memcpy(&priv->_4965.statistics, &pkt->u.stats,
290 pkt->u.stats.general.common.temperature) || 254 sizeof(priv->_4965.statistics));
291 ((priv->_agn.statistics.flag &
292 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
293 (pkt->u.stats.flag &
294 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
295#ifdef CONFIG_IWLWIFI_DEBUGFS
296 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
297#endif
298
299 }
300
301 iwl_recover_from_statistics(priv, pkt);
302
303 if (iwl_bt_statistics(priv))
304 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
305 sizeof(priv->_agn.statistics_bt));
306 else
307 memcpy(&priv->_agn.statistics, &pkt->u.stats,
308 sizeof(priv->_agn.statistics));
309 255
310 set_bit(STATUS_STATISTICS, &priv->status); 256 set_bit(STATUS_STATISTICS, &priv->status);
311 257
@@ -318,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv,
318 264
319 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 265 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
320 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { 266 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
321 iwl_rx_calc_noise(priv); 267 iwl4965_rx_calc_noise(priv);
322 queue_work(priv->workqueue, &priv->run_time_calib_work); 268 queue_work(priv->workqueue, &priv->run_time_calib_work);
323 } 269 }
324 if (priv->cfg->ops->lib->temp_ops.temperature && change) 270 if (priv->cfg->ops->lib->temp_ops.temperature && change)
325 priv->cfg->ops->lib->temp_ops.temperature(priv); 271 priv->cfg->ops->lib->temp_ops.temperature(priv);
326} 272}
327 273
328void iwl_reply_statistics(struct iwl_priv *priv, 274void iwl4965_reply_statistics(struct iwl_priv *priv,
329 struct iwl_rx_mem_buffer *rxb) 275 struct iwl_rx_mem_buffer *rxb)
330{ 276{
331 struct iwl_rx_packet *pkt = rxb_addr(rxb); 277 struct iwl_rx_packet *pkt = rxb_addr(rxb);
332 278
333 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { 279 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
334#ifdef CONFIG_IWLWIFI_DEBUGFS 280#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
335 memset(&priv->_agn.accum_statistics, 0, 281 memset(&priv->_4965.accum_statistics, 0,
336 sizeof(struct iwl_notif_statistics)); 282 sizeof(struct iwl_notif_statistics));
337 memset(&priv->_agn.delta_statistics, 0, 283 memset(&priv->_4965.delta_statistics, 0,
338 sizeof(struct iwl_notif_statistics)); 284 sizeof(struct iwl_notif_statistics));
339 memset(&priv->_agn.max_delta, 0, 285 memset(&priv->_4965.max_delta, 0,
340 sizeof(struct iwl_notif_statistics)); 286 sizeof(struct iwl_notif_statistics));
341 memset(&priv->_agn.accum_statistics_bt, 0,
342 sizeof(struct iwl_bt_notif_statistics));
343 memset(&priv->_agn.delta_statistics_bt, 0,
344 sizeof(struct iwl_bt_notif_statistics));
345 memset(&priv->_agn.max_delta_bt, 0,
346 sizeof(struct iwl_bt_notif_statistics));
347#endif 287#endif
348 IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); 288 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
349 } 289 }
350 iwl_rx_statistics(priv, rxb); 290 iwl4965_rx_statistics(priv, rxb);
351} 291}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..a262c23553d2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..5c40502f869a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1369 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_key(keyconf, skb_frag,
244 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
245 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
246 break;
247
248 case WLAN_CIPHER_SUITE_WEP104:
249 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
250 /* fall through */
251 case WLAN_CIPHER_SUITE_WEP40:
252 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
253 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
254
255 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
256
257 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
258 "with key %d\n", keyconf->keyidx);
259 break;
260
261 default:
262 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
263 break;
264 }
265}
266
267/*
268 * start REPLY_TX command process
269 */
270int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
271{
272 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
273 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
274 struct ieee80211_sta *sta = info->control.sta;
275 struct iwl_station_priv *sta_priv = NULL;
276 struct iwl_tx_queue *txq;
277 struct iwl_queue *q;
278 struct iwl_device_cmd *out_cmd;
279 struct iwl_cmd_meta *out_meta;
280 struct iwl_tx_cmd *tx_cmd;
281 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
282 int txq_id;
283 dma_addr_t phys_addr;
284 dma_addr_t txcmd_phys;
285 dma_addr_t scratch_phys;
286 u16 len, firstlen, secondlen;
287 u16 seq_number = 0;
288 __le16 fc;
289 u8 hdr_len;
290 u8 sta_id;
291 u8 wait_write_ptr = 0;
292 u8 tid = 0;
293 u8 *qc = NULL;
294 unsigned long flags;
295 bool is_agg = false;
296
297 if (info->control.vif)
298 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
299
300 spin_lock_irqsave(&priv->lock, flags);
301 if (iwl_legacy_is_rfkill(priv)) {
302 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
303 goto drop_unlock;
304 }
305
306 fc = hdr->frame_control;
307
308#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
309 if (ieee80211_is_auth(fc))
310 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
311 else if (ieee80211_is_assoc_req(fc))
312 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
313 else if (ieee80211_is_reassoc_req(fc))
314 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
315#endif
316
317 hdr_len = ieee80211_hdrlen(fc);
318
319 /* Find index into station table for destination station */
320 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
321 if (sta_id == IWL_INVALID_STATION) {
322 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
323 hdr->addr1);
324 goto drop_unlock;
325 }
326
327 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
328
329 if (sta)
330 sta_priv = (void *)sta->drv_priv;
331
332 if (sta_priv && sta_priv->asleep &&
333 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
334 /*
335 * This sends an asynchronous command to the device,
336 * but we can rely on it being processed before the
337 * next frame is processed -- and the next frame to
338 * this station is the one that will consume this
339 * counter.
340 * For now set the counter to just 1 since we do not
341 * support uAPSD yet.
342 */
343 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
344 }
345
346 /*
347 * Send this frame after DTIM -- there's a special queue
348 * reserved for this for contexts that support AP mode.
349 */
350 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
351 txq_id = ctx->mcast_queue;
352 /*
353 * The microcode will clear the more data
354 * bit in the last frame it transmits.
355 */
356 hdr->frame_control |=
357 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
358 } else
359 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
360
361 /* irqs already disabled/saved above when locking priv->lock */
362 spin_lock(&priv->sta_lock);
363
364 if (ieee80211_is_data_qos(fc)) {
365 qc = ieee80211_get_qos_ctl(hdr);
366 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
367 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
368 spin_unlock(&priv->sta_lock);
369 goto drop_unlock;
370 }
371 seq_number = priv->stations[sta_id].tid[tid].seq_number;
372 seq_number &= IEEE80211_SCTL_SEQ;
373 hdr->seq_ctrl = hdr->seq_ctrl &
374 cpu_to_le16(IEEE80211_SCTL_FRAG);
375 hdr->seq_ctrl |= cpu_to_le16(seq_number);
376 seq_number += 0x10;
377 /* aggregation is on for this <sta,tid> */
378 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
379 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
380 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
381 is_agg = true;
382 }
383 }
384
385 txq = &priv->txq[txq_id];
386 q = &txq->q;
387
388 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
389 spin_unlock(&priv->sta_lock);
390 goto drop_unlock;
391 }
392
393 if (ieee80211_is_data_qos(fc)) {
394 priv->stations[sta_id].tid[tid].tfds_in_queue++;
395 if (!ieee80211_has_morefrags(fc))
396 priv->stations[sta_id].tid[tid].seq_number = seq_number;
397 }
398
399 spin_unlock(&priv->sta_lock);
400
401 /* Set up driver data for this TFD */
402 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
403 txq->txb[q->write_ptr].skb = skb;
404 txq->txb[q->write_ptr].ctx = ctx;
405
406 /* Set up first empty entry in queue's array of Tx/cmd buffers */
407 out_cmd = txq->cmd[q->write_ptr];
408 out_meta = &txq->meta[q->write_ptr];
409 tx_cmd = &out_cmd->cmd.tx;
410 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
411 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
412
413 /*
414 * Set up the Tx-command (not MAC!) header.
415 * Store the chosen Tx queue and TFD index within the sequence field;
416 * after Tx, uCode's Tx response will return this value so driver can
417 * locate the frame within the tx queue and do post-tx processing.
418 */
419 out_cmd->hdr.cmd = REPLY_TX;
420 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
421 INDEX_TO_SEQ(q->write_ptr)));
422
423 /* Copy MAC header from skb into command buffer */
424 memcpy(tx_cmd->hdr, hdr, hdr_len);
425
426
427 /* Total # bytes to be transmitted */
428 len = (u16)skb->len;
429 tx_cmd->len = cpu_to_le16(len);
430
431 if (info->control.hw_key)
432 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
433
434 /* TODO need this for burst mode later on */
435 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
436 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
437
438 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
439
440 iwl_legacy_update_stats(priv, true, fc, len);
441 /*
442 * Use the first empty entry in this queue's command buffer array
443 * to contain the Tx command and MAC header concatenated together
444 * (payload data will be in another buffer).
445 * Size of this varies, due to varying MAC header length.
446 * If end is not dword aligned, we'll have 2 extra bytes at the end
447 * of the MAC header (device reads on dword boundaries).
448 * We'll tell device about this padding later.
449 */
450 len = sizeof(struct iwl_tx_cmd) +
451 sizeof(struct iwl_cmd_header) + hdr_len;
452 firstlen = (len + 3) & ~3;
453
454 /* Tell NIC about any 2-byte padding after MAC header */
455 if (firstlen != len)
456 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
457
458 /* Physical address of this Tx command's header (not MAC header!),
459 * within command buffer array. */
460 txcmd_phys = pci_map_single(priv->pci_dev,
461 &out_cmd->hdr, firstlen,
462 PCI_DMA_BIDIRECTIONAL);
463 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
464 dma_unmap_len_set(out_meta, len, firstlen);
465 /* Add buffer containing Tx command and MAC(!) header to TFD's
466 * first entry */
467 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
468 txcmd_phys, firstlen, 1, 0);
469
470 if (!ieee80211_has_morefrags(hdr->frame_control)) {
471 txq->need_update = 1;
472 } else {
473 wait_write_ptr = 1;
474 txq->need_update = 0;
475 }
476
477 /* Set up TFD's 2nd entry to point directly to remainder of skb,
478 * if any (802.11 null frames have no payload). */
479 secondlen = skb->len - hdr_len;
480 if (secondlen > 0) {
481 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
482 secondlen, PCI_DMA_TODEVICE);
483 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
484 phys_addr, secondlen,
485 0, 0);
486 }
487
488 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
489 offsetof(struct iwl_tx_cmd, scratch);
490
491 /* take back ownership of DMA buffer to enable update */
492 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
493 firstlen, PCI_DMA_BIDIRECTIONAL);
494 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
495 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
496
497 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
498 le16_to_cpu(out_cmd->hdr.sequence));
499 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
500 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
501 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
502
503 /* Set up entry for this TFD in Tx byte-count array */
504 if (info->flags & IEEE80211_TX_CTL_AMPDU)
505 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
506 le16_to_cpu(tx_cmd->len));
507
508 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
509 firstlen, PCI_DMA_BIDIRECTIONAL);
510
511 trace_iwlwifi_legacy_dev_tx(priv,
512 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
513 sizeof(struct iwl_tfd),
514 &out_cmd->hdr, firstlen,
515 skb->data + hdr_len, secondlen);
516
517 /* Tell device the write index *just past* this latest filled TFD */
518 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
519 iwl_legacy_txq_update_write_ptr(priv, txq);
520 spin_unlock_irqrestore(&priv->lock, flags);
521
522 /*
523 * At this point the frame is "transmitted" successfully
524 * and we will get a TX status notification eventually,
525 * regardless of the value of ret. "ret" only indicates
526 * whether or not we should update the write pointer.
527 */
528
529 /*
530 * Avoid atomic ops if it isn't an associated client.
531 * Also, if this is a packet for aggregation, don't
532 * increase the counter because the ucode will stop
533 * aggregation queues when their respective station
534 * goes to sleep.
535 */
536 if (sta_priv && sta_priv->client && !is_agg)
537 atomic_inc(&sta_priv->pending_frames);
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
540 priv->mac80211_registered) {
541 if (wait_write_ptr) {
542 spin_lock_irqsave(&priv->lock, flags);
543 txq->need_update = 1;
544 iwl_legacy_txq_update_write_ptr(priv, txq);
545 spin_unlock_irqrestore(&priv->lock, flags);
546 } else {
547 iwl_legacy_stop_queue(priv, txq);
548 }
549 }
550
551 return 0;
552
553drop_unlock:
554 spin_unlock_irqrestore(&priv->lock, flags);
555 return -1;
556}
557
558static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
559 struct iwl_dma_ptr *ptr, size_t size)
560{
561 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
562 GFP_KERNEL);
563 if (!ptr->addr)
564 return -ENOMEM;
565 ptr->size = size;
566 return 0;
567}
568
569static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
570 struct iwl_dma_ptr *ptr)
571{
572 if (unlikely(!ptr->addr))
573 return;
574
575 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
576 memset(ptr, 0, sizeof(*ptr));
577}
578
579/**
580 * iwl4965_hw_txq_ctx_free - Free TXQ Context
581 *
582 * Destroy all TX DMA queues and structures
583 */
584void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
585{
586 int txq_id;
587
588 /* Tx queues */
589 if (priv->txq) {
590 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
591 if (txq_id == priv->cmd_queue)
592 iwl_legacy_cmd_queue_free(priv);
593 else
594 iwl_legacy_tx_queue_free(priv, txq_id);
595 }
596 iwl4965_free_dma_ptr(priv, &priv->kw);
597
598 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
599
600 /* free tx queue structure */
601 iwl_legacy_txq_mem(priv);
602}
603
604/**
605 * iwl4965_txq_ctx_alloc - allocate TX queue context
606 * Allocate all Tx DMA structures and initialize them
607 *
608 * @param priv
609 * @return error code
610 */
611int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
612{
613 int ret;
614 int txq_id, slots_num;
615 unsigned long flags;
616
617 /* Free all tx/cmd queues and keep-warm buffer */
618 iwl4965_hw_txq_ctx_free(priv);
619
620 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
621 priv->hw_params.scd_bc_tbls_size);
622 if (ret) {
623 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
624 goto error_bc_tbls;
625 }
626 /* Alloc keep-warm buffer */
627 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
628 if (ret) {
629 IWL_ERR(priv, "Keep Warm allocation failed\n");
630 goto error_kw;
631 }
632
633 /* allocate tx queue structure */
634 ret = iwl_legacy_alloc_txq_mem(priv);
635 if (ret)
636 goto error;
637
638 spin_lock_irqsave(&priv->lock, flags);
639
640 /* Turn off all Tx DMA fifos */
641 iwl4965_txq_set_sched(priv, 0);
642
643 /* Tell NIC where to find the "keep warm" buffer */
644 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
645
646 spin_unlock_irqrestore(&priv->lock, flags);
647
648 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
649 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
650 slots_num = (txq_id == priv->cmd_queue) ?
651 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
652 ret = iwl_legacy_tx_queue_init(priv,
653 &priv->txq[txq_id], slots_num,
654 txq_id);
655 if (ret) {
656 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
657 goto error;
658 }
659 }
660
661 return ret;
662
663 error:
664 iwl4965_hw_txq_ctx_free(priv);
665 iwl4965_free_dma_ptr(priv, &priv->kw);
666 error_kw:
667 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
668 error_bc_tbls:
669 return ret;
670}
671
672void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
673{
674 int txq_id, slots_num;
675 unsigned long flags;
676
677 spin_lock_irqsave(&priv->lock, flags);
678
679 /* Turn off all Tx DMA fifos */
680 iwl4965_txq_set_sched(priv, 0);
681
682 /* Tell NIC where to find the "keep warm" buffer */
683 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
684
685 spin_unlock_irqrestore(&priv->lock, flags);
686
687 /* Alloc and init all Tx queues, including the command queue (#4) */
688 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
689 slots_num = txq_id == priv->cmd_queue ?
690 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
691 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
692 slots_num, txq_id);
693 }
694}
695
696/**
697 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
698 */
699void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
700{
701 int ch, txq_id;
702 unsigned long flags;
703
704 /* Turn off all Tx DMA fifos */
705 spin_lock_irqsave(&priv->lock, flags);
706
707 iwl4965_txq_set_sched(priv, 0);
708
709 /* Stop each Tx DMA channel, and wait for it to be idle */
710 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
711 iwl_legacy_write_direct32(priv,
712 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
713 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
714 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
715 1000))
716 IWL_ERR(priv, "Failing on timeout while stopping"
717 " DMA channel %d [0x%08x]", ch,
718 iwl_legacy_read_direct32(priv,
719 FH_TSSR_TX_STATUS_REG));
720 }
721 spin_unlock_irqrestore(&priv->lock, flags);
722
723 if (!priv->txq)
724 return;
725
726 /* Unmap DMA from host system and free skb's */
727 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
728 if (txq_id == priv->cmd_queue)
729 iwl_legacy_cmd_queue_unmap(priv);
730 else
731 iwl_legacy_tx_queue_unmap(priv, txq_id);
732}
733
734/*
735 * Find first available (lowest unused) Tx Queue, mark it "active".
736 * Called only when finding queue for aggregation.
737 * Should never return anything < 7, because they should already
738 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
739 */
740static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
741{
742 int txq_id;
743
744 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
745 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
746 return txq_id;
747 return -1;
748}
749
750/**
751 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
752 */
753static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
754 u16 txq_id)
755{
756 /* Simply stop the queue, but don't change any configuration;
757 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
758 iwl_legacy_write_prph(priv,
759 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
760 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
761 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
762}
763
764/**
765 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
766 */
767static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
768 u16 txq_id)
769{
770 u32 tbl_dw_addr;
771 u32 tbl_dw;
772 u16 scd_q2ratid;
773
774 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
775
776 tbl_dw_addr = priv->scd_base_addr +
777 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
778
779 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
780
781 if (txq_id & 0x1)
782 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
783 else
784 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
785
786 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
787
788 return 0;
789}
790
791/**
792 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
793 *
794 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
795 * i.e. it must be one of the higher queues used for aggregation
796 */
797static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
798 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
799{
800 unsigned long flags;
801 u16 ra_tid;
802 int ret;
803
804 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
805 (IWL49_FIRST_AMPDU_QUEUE +
806 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
807 IWL_WARN(priv,
808 "queue number out of range: %d, must be %d to %d\n",
809 txq_id, IWL49_FIRST_AMPDU_QUEUE,
810 IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues - 1);
812 return -EINVAL;
813 }
814
815 ra_tid = BUILD_RAxTID(sta_id, tid);
816
817 /* Modify device's station table to Tx this TID */
818 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
819 if (ret)
820 return ret;
821
822 spin_lock_irqsave(&priv->lock, flags);
823
824 /* Stop this Tx queue before configuring it */
825 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
826
827 /* Map receiver-address / traffic-ID to this queue */
828 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
829
830 /* Set this queue as a chain-building queue */
831 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
832
833 /* Place first TFD at index corresponding to start sequence number.
834 * Assumes that ssn_idx is valid (!= 0xFFF) */
835 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
836 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
837 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
838
839 /* Set up Tx window size and frame limit for this queue */
840 iwl_legacy_write_targ_mem(priv,
841 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
842 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
843 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
844
845 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
846 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
847 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
848 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
849
850 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
851
852 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
853 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
854
855 spin_unlock_irqrestore(&priv->lock, flags);
856
857 return 0;
858}
859
860
861int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
862 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
863{
864 int sta_id;
865 int tx_fifo;
866 int txq_id;
867 int ret;
868 unsigned long flags;
869 struct iwl_tid_data *tid_data;
870
871 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
872 if (unlikely(tx_fifo < 0))
873 return tx_fifo;
874
875 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
876 __func__, sta->addr, tid);
877
878 sta_id = iwl_legacy_sta_id(sta);
879 if (sta_id == IWL_INVALID_STATION) {
880 IWL_ERR(priv, "Start AGG on invalid station\n");
881 return -ENXIO;
882 }
883 if (unlikely(tid >= MAX_TID_COUNT))
884 return -EINVAL;
885
886 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
887 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
888 return -ENXIO;
889 }
890
891 txq_id = iwl4965_txq_ctx_activate_free(priv);
892 if (txq_id == -1) {
893 IWL_ERR(priv, "No free aggregation queue available\n");
894 return -ENXIO;
895 }
896
897 spin_lock_irqsave(&priv->sta_lock, flags);
898 tid_data = &priv->stations[sta_id].tid[tid];
899 *ssn = SEQ_TO_SN(tid_data->seq_number);
900 tid_data->agg.txq_id = txq_id;
901 iwl_legacy_set_swq_id(&priv->txq[txq_id],
902 iwl4965_get_ac_from_tid(tid), txq_id);
903 spin_unlock_irqrestore(&priv->sta_lock, flags);
904
905 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
906 sta_id, tid, *ssn);
907 if (ret)
908 return ret;
909
910 spin_lock_irqsave(&priv->sta_lock, flags);
911 tid_data = &priv->stations[sta_id].tid[tid];
912 if (tid_data->tfds_in_queue == 0) {
913 IWL_DEBUG_HT(priv, "HW queue is empty\n");
914 tid_data->agg.state = IWL_AGG_ON;
915 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
916 } else {
917 IWL_DEBUG_HT(priv,
918 "HW queue is NOT empty: %d packets in HW queue\n",
919 tid_data->tfds_in_queue);
920 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
921 }
922 spin_unlock_irqrestore(&priv->sta_lock, flags);
923 return ret;
924}
925
926/**
927 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
928 * priv->lock must be held by the caller
929 */
930static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
931 u16 ssn_idx, u8 tx_fifo)
932{
933 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
934 (IWL49_FIRST_AMPDU_QUEUE +
935 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
936 IWL_WARN(priv,
937 "queue number out of range: %d, must be %d to %d\n",
938 txq_id, IWL49_FIRST_AMPDU_QUEUE,
939 IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues - 1);
941 return -EINVAL;
942 }
943
944 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
945
946 iwl_legacy_clear_bits_prph(priv,
947 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
948
949 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
950 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
951 /* supposes that ssn_idx is valid (!= 0xFFF) */
952 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
953
954 iwl_legacy_clear_bits_prph(priv,
955 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
956 iwl_txq_ctx_deactivate(priv, txq_id);
957 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
958
959 return 0;
960}
961
962int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
963 struct ieee80211_sta *sta, u16 tid)
964{
965 int tx_fifo_id, txq_id, sta_id, ssn;
966 struct iwl_tid_data *tid_data;
967 int write_ptr, read_ptr;
968 unsigned long flags;
969
970 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
971 if (unlikely(tx_fifo_id < 0))
972 return tx_fifo_id;
973
974 sta_id = iwl_legacy_sta_id(sta);
975
976 if (sta_id == IWL_INVALID_STATION) {
977 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
978 return -ENXIO;
979 }
980
981 spin_lock_irqsave(&priv->sta_lock, flags);
982
983 tid_data = &priv->stations[sta_id].tid[tid];
984 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
985 txq_id = tid_data->agg.txq_id;
986
987 switch (priv->stations[sta_id].tid[tid].agg.state) {
988 case IWL_EMPTYING_HW_QUEUE_ADDBA:
989 /*
990 * This can happen if the peer stops aggregation
991 * again before we've had a chance to drain the
992 * queue we selected previously, i.e. before the
993 * session was really started completely.
994 */
995 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
996 goto turn_off;
997 case IWL_AGG_ON:
998 break;
999 default:
1000 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1001 }
1002
1003 write_ptr = priv->txq[txq_id].q.write_ptr;
1004 read_ptr = priv->txq[txq_id].q.read_ptr;
1005
1006 /* The queue is not empty */
1007 if (write_ptr != read_ptr) {
1008 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1009 priv->stations[sta_id].tid[tid].agg.state =
1010 IWL_EMPTYING_HW_QUEUE_DELBA;
1011 spin_unlock_irqrestore(&priv->sta_lock, flags);
1012 return 0;
1013 }
1014
1015 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1016 turn_off:
1017 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1018
1019 /* do not restore/save irqs */
1020 spin_unlock(&priv->sta_lock);
1021 spin_lock(&priv->lock);
1022
1023 /*
1024 * the only reason this call can fail is queue number out of range,
1025 * which can happen if uCode is reloaded and all the station
1026 * information are lost. if it is outside the range, there is no need
1027 * to deactivate the uCode queue, just return "success" to allow
1028 * mac80211 to clean up it own data.
1029 */
1030 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032
1033 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1034
1035 return 0;
1036}
1037
1038int iwl4965_txq_check_empty(struct iwl_priv *priv,
1039 int sta_id, u8 tid, int txq_id)
1040{
1041 struct iwl_queue *q = &priv->txq[txq_id].q;
1042 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1043 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1044 struct iwl_rxon_context *ctx;
1045
1046 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1047
1048 lockdep_assert_held(&priv->sta_lock);
1049
1050 switch (priv->stations[sta_id].tid[tid].agg.state) {
1051 case IWL_EMPTYING_HW_QUEUE_DELBA:
1052 /* We are reclaiming the last packet of the */
1053 /* aggregated HW queue */
1054 if ((txq_id == tid_data->agg.txq_id) &&
1055 (q->read_ptr == q->write_ptr)) {
1056 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1057 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1058 IWL_DEBUG_HT(priv,
1059 "HW queue empty: continue DELBA flow\n");
1060 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1061 tid_data->agg.state = IWL_AGG_OFF;
1062 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1063 }
1064 break;
1065 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1066 /* We are reclaiming the last packet of the queue */
1067 if (tid_data->tfds_in_queue == 0) {
1068 IWL_DEBUG_HT(priv,
1069 "HW queue empty: continue ADDBA flow\n");
1070 tid_data->agg.state = IWL_AGG_ON;
1071 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1072 }
1073 break;
1074 }
1075
1076 return 0;
1077}
1078
1079static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1080 struct iwl_rxon_context *ctx,
1081 const u8 *addr1)
1082{
1083 struct ieee80211_sta *sta;
1084 struct iwl_station_priv *sta_priv;
1085
1086 rcu_read_lock();
1087 sta = ieee80211_find_sta(ctx->vif, addr1);
1088 if (sta) {
1089 sta_priv = (void *)sta->drv_priv;
1090 /* avoid atomic ops if this isn't a client */
1091 if (sta_priv->client &&
1092 atomic_dec_return(&sta_priv->pending_frames) == 0)
1093 ieee80211_sta_block_awake(priv->hw, sta, false);
1094 }
1095 rcu_read_unlock();
1096}
1097
1098static void
1099iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1100 bool is_agg)
1101{
1102 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1103
1104 if (!is_agg)
1105 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1106
1107 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1108}
1109
1110int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1111{
1112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1113 struct iwl_queue *q = &txq->q;
1114 struct iwl_tx_info *tx_info;
1115 int nfreed = 0;
1116 struct ieee80211_hdr *hdr;
1117
1118 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1119 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1120 "is out of range [0-%d] %d %d.\n", txq_id,
1121 index, q->n_bd, q->write_ptr, q->read_ptr);
1122 return 0;
1123 }
1124
1125 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1126 q->read_ptr != index;
1127 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1128
1129 tx_info = &txq->txb[txq->q.read_ptr];
1130 iwl4965_tx_status(priv, tx_info,
1131 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1132
1133 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1134 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1135 nfreed++;
1136 tx_info->skb = NULL;
1137
1138 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1139 }
1140 return nfreed;
1141}
1142
1143/**
1144 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1145 *
1146 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1147 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1148 */
1149static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1150 struct iwl_ht_agg *agg,
1151 struct iwl_compressed_ba_resp *ba_resp)
1152
1153{
1154 int i, sh, ack;
1155 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1156 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1157 int successes = 0;
1158 struct ieee80211_tx_info *info;
1159 u64 bitmap, sent_bitmap;
1160
1161 if (unlikely(!agg->wait_for_ba)) {
1162 if (unlikely(ba_resp->bitmap))
1163 IWL_ERR(priv, "Received BA when not expected\n");
1164 return -EINVAL;
1165 }
1166
1167 /* Mark that the expected block-ack response arrived */
1168 agg->wait_for_ba = 0;
1169 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1170 ba_resp->seq_ctl);
1171
1172 /* Calculate shift to align block-ack bits with our Tx window bits */
1173 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1174 if (sh < 0) /* tbw something is wrong with indices */
1175 sh += 0x100;
1176
1177 if (agg->frame_count > (64 - sh)) {
1178 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1179 return -1;
1180 }
1181
1182 /* don't use 64-bit values for now */
1183 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1184
1185 /* check for success or failure according to the
1186 * transmitted bitmap and block-ack bitmap */
1187 sent_bitmap = bitmap & agg->bitmap;
1188
1189 /* For each frame attempted in aggregation,
1190 * update driver's record of tx frame's status. */
1191 i = 0;
1192 while (sent_bitmap) {
1193 ack = sent_bitmap & 1ULL;
1194 successes += ack;
1195 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1196 ack ? "ACK" : "NACK", i,
1197 (agg->start_idx + i) & 0xff,
1198 agg->start_idx + i);
1199 sent_bitmap >>= 1;
1200 ++i;
1201 }
1202
1203 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1204 (unsigned long long)bitmap);
1205
1206 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1207 memset(&info->status, 0, sizeof(info->status));
1208 info->flags |= IEEE80211_TX_STAT_ACK;
1209 info->flags |= IEEE80211_TX_STAT_AMPDU;
1210 info->status.ampdu_ack_len = successes;
1211 info->status.ampdu_len = agg->frame_count;
1212 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1213
1214 return 0;
1215}
1216
1217/**
1218 * translate ucode response to mac80211 tx status control values
1219 */
1220void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1221 struct ieee80211_tx_info *info)
1222{
1223 struct ieee80211_tx_rate *r = &info->control.rates[0];
1224
1225 info->antenna_sel_tx =
1226 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1227 if (rate_n_flags & RATE_MCS_HT_MSK)
1228 r->flags |= IEEE80211_TX_RC_MCS;
1229 if (rate_n_flags & RATE_MCS_GF_MSK)
1230 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1231 if (rate_n_flags & RATE_MCS_HT40_MSK)
1232 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1233 if (rate_n_flags & RATE_MCS_DUP_MSK)
1234 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1235 if (rate_n_flags & RATE_MCS_SGI_MSK)
1236 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1237 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1238}
1239
1240/**
1241 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1242 *
1243 * Handles block-acknowledge notification from device, which reports success
1244 * of frames sent via aggregation.
1245 */
1246void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1247 struct iwl_rx_mem_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1251 struct iwl_tx_queue *txq = NULL;
1252 struct iwl_ht_agg *agg;
1253 int index;
1254 int sta_id;
1255 int tid;
1256 unsigned long flags;
1257
1258 /* "flow" corresponds to Tx queue */
1259 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1260
1261 /* "ssn" is start of block-ack Tx window, corresponds to index
1262 * (in Tx queue's circular buffer) of first TFD/frame in window */
1263 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1264
1265 if (scd_flow >= priv->hw_params.max_txq_num) {
1266 IWL_ERR(priv,
1267 "BUG_ON scd_flow is bigger than number of queues\n");
1268 return;
1269 }
1270
1271 txq = &priv->txq[scd_flow];
1272 sta_id = ba_resp->sta_id;
1273 tid = ba_resp->tid;
1274 agg = &priv->stations[sta_id].tid[tid].agg;
1275 if (unlikely(agg->txq_id != scd_flow)) {
1276 /*
1277 * FIXME: this is a uCode bug which need to be addressed,
1278 * log the information and return for now!
1279 * since it is possible happen very often and in order
1280 * not to fill the syslog, don't enable the logging by default
1281 */
1282 IWL_DEBUG_TX_REPLY(priv,
1283 "BA scd_flow %d does not match txq_id %d\n",
1284 scd_flow, agg->txq_id);
1285 return;
1286 }
1287
1288 /* Find index just before block-ack window */
1289 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1290
1291 spin_lock_irqsave(&priv->sta_lock, flags);
1292
1293 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1294 "sta_id = %d\n",
1295 agg->wait_for_ba,
1296 (u8 *) &ba_resp->sta_addr_lo32,
1297 ba_resp->sta_id);
1298 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1299 "scd_flow = "
1300 "%d, scd_ssn = %d\n",
1301 ba_resp->tid,
1302 ba_resp->seq_ctl,
1303 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1304 ba_resp->scd_flow,
1305 ba_resp->scd_ssn);
1306 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1307 agg->start_idx,
1308 (unsigned long long)agg->bitmap);
1309
1310 /* Update driver's record of ACK vs. not for each frame in window */
1311 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1312
1313 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1314 * block-ack window (we assume that they've been successfully
1315 * transmitted ... if not, it's too late anyway). */
1316 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1317 /* calculate mac80211 ampdu sw queue to wake */
1318 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1319 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1320
1321 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1322 priv->mac80211_registered &&
1323 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1324 iwl_legacy_wake_queue(priv, txq);
1325
1326 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1327 }
1328
1329 spin_unlock_irqrestore(&priv->sta_lock, flags);
1330}
1331
1332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1333const char *iwl4965_get_tx_fail_reason(u32 status)
1334{
1335#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1336#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1337
1338 switch (status & TX_STATUS_MSK) {
1339 case TX_STATUS_SUCCESS:
1340 return "SUCCESS";
1341 TX_STATUS_POSTPONE(DELAY);
1342 TX_STATUS_POSTPONE(FEW_BYTES);
1343 TX_STATUS_POSTPONE(QUIET_PERIOD);
1344 TX_STATUS_POSTPONE(CALC_TTAK);
1345 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1346 TX_STATUS_FAIL(SHORT_LIMIT);
1347 TX_STATUS_FAIL(LONG_LIMIT);
1348 TX_STATUS_FAIL(FIFO_UNDERRUN);
1349 TX_STATUS_FAIL(DRAIN_FLOW);
1350 TX_STATUS_FAIL(RFKILL_FLUSH);
1351 TX_STATUS_FAIL(LIFE_EXPIRE);
1352 TX_STATUS_FAIL(DEST_PS);
1353 TX_STATUS_FAIL(HOST_ABORTED);
1354 TX_STATUS_FAIL(BT_RETRY);
1355 TX_STATUS_FAIL(STA_INVALID);
1356 TX_STATUS_FAIL(FRAG_DROPPED);
1357 TX_STATUS_FAIL(TID_DISABLE);
1358 TX_STATUS_FAIL(FIFO_FLUSHED);
1359 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1360 TX_STATUS_FAIL(PASSIVE_NO_RX);
1361 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1362 }
1363
1364 return "UNKNOWN";
1365
1366#undef TX_STATUS_FAIL
1367#undef TX_STATUS_POSTPONE
1368}
1369#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index 8998ed134d1a..f5433c74b845 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -43,12 +43,11 @@
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-agn-calib.h" 46#include "iwl-4965-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-4965-led.h"
49#include "iwl-agn.h" 49#include "iwl-4965.h"
50#include "iwl-agn-debugfs.h" 50#include "iwl-4965-debugfs.h"
51#include "iwl-legacy.h"
52 51
53static int iwl4965_send_tx_power(struct iwl_priv *priv); 52static int iwl4965_send_tx_power(struct iwl_priv *priv);
54static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
74 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 73 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
75 74
76 /* verify BSM SRAM contents */ 75 /* verify BSM SRAM contents */
77 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 76 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
78 for (reg = BSM_SRAM_LOWER_BOUND; 77 for (reg = BSM_SRAM_LOWER_BOUND;
79 reg < BSM_SRAM_LOWER_BOUND + len; 78 reg < BSM_SRAM_LOWER_BOUND + len;
80 reg += sizeof(u32), image++) { 79 reg += sizeof(u32), image++) {
81 val = iwl_read_prph(priv, reg); 80 val = iwl_legacy_read_prph(priv, reg);
82 if (val != le32_to_cpu(*image)) { 81 if (val != le32_to_cpu(*image)) {
83 IWL_ERR(priv, "BSM uCode verification failed at " 82 IWL_ERR(priv, "BSM uCode verification failed at "
84 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 83 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
158 inst_len = priv->ucode_init.len; 157 inst_len = priv->ucode_init.len;
159 data_len = priv->ucode_init_data.len; 158 data_len = priv->ucode_init_data.len;
160 159
161 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 160 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
162 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 161 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
163 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 162 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
164 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 163 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
165 164
166 /* Fill BSM memory with bootstrap instructions */ 165 /* Fill BSM memory with bootstrap instructions */
167 for (reg_offset = BSM_SRAM_LOWER_BOUND; 166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
168 reg_offset < BSM_SRAM_LOWER_BOUND + len; 167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
169 reg_offset += sizeof(u32), image++) 168 reg_offset += sizeof(u32), image++)
170 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); 169 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
171 170
172 ret = iwl4965_verify_bsm(priv); 171 ret = iwl4965_verify_bsm(priv);
173 if (ret) 172 if (ret)
174 return ret; 173 return ret;
175 174
176 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
177 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 176 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
178 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); 177 iwl_legacy_write_prph(priv,
179 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 178 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
179 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180 180
181 /* Load bootstrap code into instruction SRAM now, 181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */ 182 * to prepare to load "initialize" uCode */
183 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 183 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184 184
185 /* Wait for load of bootstrap uCode to finish */ 185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) { 186 for (i = 0; i < 100; i++) {
187 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 187 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break; 189 break;
190 udelay(10); 190 udelay(10);
@@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
198 198
199 /* Enable future boot loads whenever power management unit triggers it 199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */ 200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 201 iwl_legacy_write_prph(priv,
202 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202 203
203 204
204 return 0; 205 return 0;
@@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
224 pdata = priv->ucode_data_backup.p_addr >> 4; 225 pdata = priv->ucode_data_backup.p_addr >> 4;
225 226
226 /* Tell bootstrap uCode where to find image to load */ 227 /* Tell bootstrap uCode where to find image to load */
227 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 228 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 230 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len); 231 priv->ucode_data.len);
231 232
232 /* Inst byte count must be last to set up, bit 31 signals uCode 233 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */ 234 * that all new ptr/size info is in place */
234 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 235 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 236 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 237 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237 238
@@ -254,7 +255,7 @@ static void iwl4965_init_alive_start(struct iwl_priv *priv)
254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 255 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the 256 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */ 257 * "initialize" alive if code weren't properly loaded. */
257 if (iwl_verify_ucode(priv)) { 258 if (iwl4965_verify_ucode(priv)) {
258 /* Runtime instruction load was bad; 259 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */ 260 * take it all the way back down so we can try again */
260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); 261 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -280,7 +281,7 @@ restart:
280 queue_work(priv->workqueue, &priv->restart); 281 queue_work(priv->workqueue, &priv->restart);
281} 282}
282 283
283static bool is_ht40_channel(__le32 rxon_flags) 284static bool iw4965_is_ht40_channel(__le32 rxon_flags)
284{ 285{
285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) 286 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS; 287 >> RXON_FLG_CHANNEL_MODE_POS;
@@ -288,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
288 (chan_mod == CHANNEL_MODE_MIXED)); 289 (chan_mod == CHANNEL_MODE_MIXED));
289} 290}
290 291
291/*
292 * EEPROM handlers
293 */
294static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
295{
296 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
297}
298
299/*
300 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
301 * must be called under priv->lock and mac access
302 */
303static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
304{
305 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
306}
307
308static void iwl4965_nic_config(struct iwl_priv *priv) 292static void iwl4965_nic_config(struct iwl_priv *priv)
309{ 293{
310 unsigned long flags; 294 unsigned long flags;
@@ -312,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
312 296
313 spin_lock_irqsave(&priv->lock, flags); 297 spin_lock_irqsave(&priv->lock, flags);
314 298
315 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 299 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
316 300
317 /* write radio config values to register */ 301 /* write radio config values to register */
318 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) 302 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
319 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 303 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
320 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 304 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
321 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 305 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
322 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 306 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
323 307
324 /* set CSR_HW_CONFIG_REG for uCode use */ 308 /* set CSR_HW_CONFIG_REG for uCode use */
325 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 309 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
326 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 310 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
327 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 311 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
328 312
329 priv->calib_info = (struct iwl_eeprom_calib_info *) 313 priv->calib_info = (struct iwl_eeprom_calib_info *)
330 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET); 314 iwl_legacy_eeprom_query_addr(priv,
315 EEPROM_4965_CALIB_TXPOWER_OFFSET);
331 316
332 spin_unlock_irqrestore(&priv->lock, flags); 317 spin_unlock_irqrestore(&priv->lock, flags);
333} 318}
@@ -340,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
340 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 325 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
341 326
342 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 327 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
343 iwl_is_any_associated(priv)) { 328 iwl_legacy_is_any_associated(priv)) {
344 struct iwl_calib_diff_gain_cmd cmd; 329 struct iwl_calib_diff_gain_cmd cmd;
345 330
346 /* clear data for chain noise calibration algorithm */ 331 /* clear data for chain noise calibration algorithm */
@@ -357,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
357 cmd.diff_gain_a = 0; 342 cmd.diff_gain_a = 0;
358 cmd.diff_gain_b = 0; 343 cmd.diff_gain_b = 0;
359 cmd.diff_gain_c = 0; 344 cmd.diff_gain_c = 0;
360 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 345 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
361 sizeof(cmd), &cmd)) 346 sizeof(cmd), &cmd))
362 IWL_ERR(priv, 347 IWL_ERR(priv,
363 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 348 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -366,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
366 } 351 }
367} 352}
368 353
369static void iwl4965_gain_computation(struct iwl_priv *priv,
370 u32 *average_noise,
371 u16 min_average_noise_antenna_i,
372 u32 min_average_noise,
373 u8 default_chain)
374{
375 int i, ret;
376 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
377
378 data->delta_gain_code[min_average_noise_antenna_i] = 0;
379
380 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
381 s32 delta_g = 0;
382
383 if (!(data->disconn_array[i]) &&
384 (data->delta_gain_code[i] ==
385 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
386 delta_g = average_noise[i] - min_average_noise;
387 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
388 data->delta_gain_code[i] =
389 min(data->delta_gain_code[i],
390 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
391
392 data->delta_gain_code[i] =
393 (data->delta_gain_code[i] | (1 << 2));
394 } else {
395 data->delta_gain_code[i] = 0;
396 }
397 }
398 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
399 data->delta_gain_code[0],
400 data->delta_gain_code[1],
401 data->delta_gain_code[2]);
402
403 /* Differential gain gets sent to uCode only once */
404 if (!data->radio_write) {
405 struct iwl_calib_diff_gain_cmd cmd;
406 data->radio_write = 1;
407
408 memset(&cmd, 0, sizeof(cmd));
409 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
410 cmd.diff_gain_a = data->delta_gain_code[0];
411 cmd.diff_gain_b = data->delta_gain_code[1];
412 cmd.diff_gain_c = data->delta_gain_code[2];
413 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
414 sizeof(cmd), &cmd);
415 if (ret)
416 IWL_DEBUG_CALIB(priv, "fail sending cmd "
417 "REPLY_PHY_CALIBRATION_CMD\n");
418
419 /* TODO we might want recalculate
420 * rx_chain in rxon cmd */
421
422 /* Mark so we run this algo only once! */
423 data->state = IWL_CHAIN_NOISE_CALIBRATED;
424 }
425}
426
427static void iwl4965_bg_txpower_work(struct work_struct *work)
428{
429 struct iwl_priv *priv = container_of(work, struct iwl_priv,
430 txpower_work);
431
432 /* If a scan happened to start before we got here
433 * then just return; the statistics notification will
434 * kick off another scheduled work to compensate for
435 * any temperature delta we missed here. */
436 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
437 test_bit(STATUS_SCANNING, &priv->status))
438 return;
439
440 mutex_lock(&priv->mutex);
441
442 /* Regardless of if we are associated, we must reconfigure the
443 * TX power since frames can be sent on non-radar channels while
444 * not associated */
445 iwl4965_send_tx_power(priv);
446
447 /* Update last_temperature to keep is_calib_needed from running
448 * when it isn't needed... */
449 priv->last_temperature = priv->temperature;
450
451 mutex_unlock(&priv->mutex);
452}
453
454/*
455 * Acquire priv->lock before calling this function !
456 */
457static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
458{
459 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
460 (index & 0xff) | (txq_id << 8));
461 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
462}
463
464/**
465 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
466 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
467 * @scd_retry: (1) Indicates queue will be used in aggregation mode
468 *
469 * NOTE: Acquire priv->lock before calling this function !
470 */
471static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
472 struct iwl_tx_queue *txq,
473 int tx_fifo_id, int scd_retry)
474{
475 int txq_id = txq->q.id;
476
477 /* Find out whether to activate Tx queue */
478 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
479
480 /* Set up and activate */
481 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
482 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
483 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
484 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
485 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
486 IWL49_SCD_QUEUE_STTS_REG_MSK);
487
488 txq->sched_retry = scd_retry;
489
490 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
491 active ? "Activate" : "Deactivate",
492 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
493}
494
495static const s8 default_queue_to_tx_fifo[] = {
496 IWL_TX_FIFO_VO,
497 IWL_TX_FIFO_VI,
498 IWL_TX_FIFO_BE,
499 IWL_TX_FIFO_BK,
500 IWL49_CMD_FIFO_NUM,
501 IWL_TX_FIFO_UNUSED,
502 IWL_TX_FIFO_UNUSED,
503};
504
505static int iwl4965_alive_notify(struct iwl_priv *priv)
506{
507 u32 a;
508 unsigned long flags;
509 int i, chan;
510 u32 reg_val;
511
512 spin_lock_irqsave(&priv->lock, flags);
513
514 /* Clear 4965's internal Tx Scheduler data base */
515 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
516 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
517 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
518 iwl_write_targ_mem(priv, a, 0);
519 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
520 iwl_write_targ_mem(priv, a, 0);
521 for (; a < priv->scd_base_addr +
522 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
523 iwl_write_targ_mem(priv, a, 0);
524
525 /* Tel 4965 where to find Tx byte count tables */
526 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
527 priv->scd_bc_tbls.dma >> 10);
528
529 /* Enable DMA channel */
530 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
531 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
532 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
533 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
534
535 /* Update FH chicken bits */
536 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
537 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
538 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
539
540 /* Disable chain mode for all queues */
541 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
542
543 /* Initialize each Tx queue (including the command queue) */
544 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
545
546 /* TFD circular buffer read/write indexes */
547 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
548 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
549
550 /* Max Tx Window size for Scheduler-ACK mode */
551 iwl_write_targ_mem(priv, priv->scd_base_addr +
552 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
553 (SCD_WIN_SIZE <<
554 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
555 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
556
557 /* Frame limit */
558 iwl_write_targ_mem(priv, priv->scd_base_addr +
559 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
560 sizeof(u32),
561 (SCD_FRAME_LIMIT <<
562 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
563 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
564
565 }
566 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
567 (1 << priv->hw_params.max_txq_num) - 1);
568
569 /* Activate all Tx DMA/FIFO channels */
570 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
571
572 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
573
574 /* make sure all queue are not stopped */
575 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
576 for (i = 0; i < 4; i++)
577 atomic_set(&priv->queue_stop_count[i], 0);
578
579 /* reset to 0 to enable all the queue first */
580 priv->txq_ctx_active_msk = 0;
581 /* Map each Tx/cmd queue to its corresponding fifo */
582 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
583
584 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
585 int ac = default_queue_to_tx_fifo[i];
586
587 iwl_txq_ctx_activate(priv, i);
588
589 if (ac == IWL_TX_FIFO_UNUSED)
590 continue;
591
592 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
593 }
594
595 spin_unlock_irqrestore(&priv->lock, flags);
596
597 return 0;
598}
599
600static struct iwl_sensitivity_ranges iwl4965_sensitivity = { 354static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
601 .min_nrg_cck = 97, 355 .min_nrg_cck = 97,
602 .max_nrg_cck = 0, /* not used, set to 0 */ 356 .max_nrg_cck = 0, /* not used, set to 0 */
@@ -658,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
658 412
659 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 413 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
660 414
661 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 415 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
662 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 416 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
663 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 417 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
664 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 418 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
665 419
666 iwl4965_set_ct_threshold(priv); 420 iwl4965_set_ct_threshold(priv);
667 421
668 priv->hw_params.sens = &iwl4965_sensitivity; 422 priv->hw_params.sens = &iwl4965_sensitivity;
669 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 423 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
670 424
671 return 0; 425 return 0;
672} 426}
@@ -1150,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1150 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, 904 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
1151 is_ht40); 905 is_ht40);
1152 906
1153 ch_info = iwl_get_channel_info(priv, priv->band, channel); 907 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
1154 908
1155 if (!is_channel_valid(ch_info)) 909 if (!iwl_legacy_is_channel_valid(ch_info))
1156 return -EINVAL; 910 return -EINVAL;
1157 911
1158 /* get txatten group, used to select 1) thermal txpower adjustment 912 /* get txatten group, used to select 1) thermal txpower adjustment
@@ -1376,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1376 1130
1377 band = priv->band == IEEE80211_BAND_2GHZ; 1131 band = priv->band == IEEE80211_BAND_2GHZ;
1378 1132
1379 is_ht40 = is_ht40_channel(ctx->active.flags); 1133 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1380 1134
1381 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1135 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1382 ctrl_chan_high = 1; 1136 ctrl_chan_high = 1;
@@ -1390,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1390 if (ret) 1144 if (ret)
1391 goto out; 1145 goto out;
1392 1146
1393 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); 1147 ret = iwl_legacy_send_cmd_pdu(priv,
1148 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1394 1149
1395out: 1150out:
1396 return ret; 1151 return ret;
@@ -1401,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1401{ 1156{
1402 int ret = 0; 1157 int ret = 0;
1403 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1158 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1404 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1159 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1405 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1160 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1406 1161
1407 if ((rxon1->flags == rxon2->flags) && 1162 if ((rxon1->flags == rxon2->flags) &&
1408 (rxon1->filter_flags == rxon2->filter_flags) && 1163 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1428,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1428 ctx->staging.ofdm_ht_dual_stream_basic_rates; 1183 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1429 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; 1184 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1430 1185
1431 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1186 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1432 sizeof(rxon_assoc), &rxon_assoc, NULL); 1187 sizeof(rxon_assoc), &rxon_assoc, NULL);
1433 if (ret) 1188 if (ret)
1434 return ret; 1189 return ret;
@@ -1439,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1439static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1194static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1440{ 1195{
1441 /* cast away the const for active_rxon in this function */ 1196 /* cast away the const for active_rxon in this function */
1442 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active; 1197 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1443 int ret; 1198 int ret;
1444 bool new_assoc = 1199 bool new_assoc =
1445 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); 1200 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1446 1201
1447 if (!iwl_is_alive(priv)) 1202 if (!iwl_legacy_is_alive(priv))
1448 return -EBUSY; 1203 return -EBUSY;
1449 1204
1450 if (!ctx->is_active) 1205 if (!ctx->is_active)
@@ -1453,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1453 /* always get timestamp with Rx frame */ 1208 /* always get timestamp with Rx frame */
1454 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 1209 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1455 1210
1456 ret = iwl_check_rxon_cmd(priv, ctx); 1211 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1457 if (ret) { 1212 if (ret) {
1458 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1213 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1459 return -EINVAL; 1214 return -EINVAL;
@@ -1467,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1467 (priv->switch_rxon.channel != ctx->staging.channel)) { 1222 (priv->switch_rxon.channel != ctx->staging.channel)) {
1468 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1469 le16_to_cpu(priv->switch_rxon.channel)); 1224 le16_to_cpu(priv->switch_rxon.channel));
1470 iwl_chswitch_done(priv, false); 1225 iwl_legacy_chswitch_done(priv, false);
1471 } 1226 }
1472 1227
1473 /* If we don't need to send a full RXON, we can use 1228 /* If we don't need to send a full RXON, we can use
1474 * iwl_rxon_assoc_cmd which is used to reconfigure filter 1229 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1475 * and other flags for the current radio configuration. */ 1230 * and other flags for the current radio configuration. */
1476 if (!iwl_full_rxon_required(priv, ctx)) { 1231 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1477 ret = iwl_send_rxon_assoc(priv, ctx); 1232 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1478 if (ret) { 1233 if (ret) {
1479 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 1234 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1480 return ret; 1235 return ret;
1481 } 1236 }
1482 1237
1483 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1484 iwl_print_rx_config_cmd(priv, ctx); 1239 iwl_legacy_print_rx_config_cmd(priv, ctx);
1485 return 0; 1240 return 0;
1486 } 1241 }
1487 1242
@@ -1489,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1489 * an RXON_ASSOC and the new config wants the associated mask enabled, 1244 * an RXON_ASSOC and the new config wants the associated mask enabled,
1490 * we must clear the associated from the active configuration 1245 * we must clear the associated from the active configuration
1491 * before we apply the new config */ 1246 * before we apply the new config */
1492 if (iwl_is_associated_ctx(ctx) && new_assoc) { 1247 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1493 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1248 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1494 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1249 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1495 1250
1496 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1251 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1497 sizeof(struct iwl_rxon_cmd), 1252 sizeof(struct iwl_legacy_rxon_cmd),
1498 active_rxon); 1253 active_rxon);
1499 1254
1500 /* If the mask clearing failed then we set 1255 /* If the mask clearing failed then we set
@@ -1504,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1504 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 1259 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1505 return ret; 1260 return ret;
1506 } 1261 }
1507 iwl_clear_ucode_stations(priv, ctx); 1262 iwl_legacy_clear_ucode_stations(priv, ctx);
1508 iwl_restore_stations(priv, ctx); 1263 iwl_legacy_restore_stations(priv, ctx);
1509 ret = iwl_restore_default_wep_keys(priv, ctx); 1264 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1510 if (ret) { 1265 if (ret) {
1511 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1266 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1512 return ret; 1267 return ret;
@@ -1521,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1521 le16_to_cpu(ctx->staging.channel), 1276 le16_to_cpu(ctx->staging.channel),
1522 ctx->staging.bssid_addr); 1277 ctx->staging.bssid_addr);
1523 1278
1524 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto); 1279 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1280 !priv->cfg->mod_params->sw_crypto);
1525 1281
1526 /* Apply the new configuration 1282 /* Apply the new configuration
1527 * RXON unassoc clears the station table in uCode so restoration of 1283 * RXON unassoc clears the station table in uCode so restoration of
1528 * stations is needed after it (the RXON command) completes 1284 * stations is needed after it (the RXON command) completes
1529 */ 1285 */
1530 if (!new_assoc) { 1286 if (!new_assoc) {
1531 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1287 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1532 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1288 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1533 if (ret) { 1289 if (ret) {
1534 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1290 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1535 return ret; 1291 return ret;
1536 } 1292 }
1537 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 1293 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1538 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1294 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1539 iwl_clear_ucode_stations(priv, ctx); 1295 iwl_legacy_clear_ucode_stations(priv, ctx);
1540 iwl_restore_stations(priv, ctx); 1296 iwl_legacy_restore_stations(priv, ctx);
1541 ret = iwl_restore_default_wep_keys(priv, ctx); 1297 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1542 if (ret) { 1298 if (ret) {
1543 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1299 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1544 return ret; 1300 return ret;
@@ -1549,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1549 /* Apply the new configuration 1305 /* Apply the new configuration
1550 * RXON assoc doesn't clear the station table in uCode, 1306 * RXON assoc doesn't clear the station table in uCode,
1551 */ 1307 */
1552 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1308 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1553 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1309 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1554 if (ret) { 1310 if (ret) {
1555 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1311 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1556 return ret; 1312 return ret;
1557 } 1313 }
1558 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1314 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1559 } 1315 }
1560 iwl_print_rx_config_cmd(priv, ctx); 1316 iwl_legacy_print_rx_config_cmd(priv, ctx);
1561 1317
1562 iwl_init_sensitivity(priv); 1318 iwl4965_init_sensitivity(priv);
1563 1319
1564 /* If we issue a new RXON command which required a tune then we must 1320 /* If we issue a new RXON command which required a tune then we must
1565 * send a new TXPOWER command or we won't be able to Tx any frames */ 1321 * send a new TXPOWER command or we won't be able to Tx any frames */
1566 ret = iwl_set_tx_power(priv, priv->tx_power_next, true); 1322 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1567 if (ret) { 1323 if (ret) {
1568 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 1324 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1569 return ret; 1325 return ret;
@@ -1590,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1590 struct ieee80211_vif *vif = ctx->vif; 1346 struct ieee80211_vif *vif = ctx->vif;
1591 band = priv->band == IEEE80211_BAND_2GHZ; 1347 band = priv->band == IEEE80211_BAND_2GHZ;
1592 1348
1593 is_ht40 = is_ht40_channel(ctx->staging.flags); 1349 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1594 1350
1595 if (is_ht40 && 1351 if (is_ht40 &&
1596 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1352 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1621,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1621 else { 1377 else {
1622 switch_time_in_usec = 1378 switch_time_in_usec =
1623 vif->bss_conf.beacon_int * switch_count * TIME_UNIT; 1379 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1624 ucode_switch_time = iwl_usecs_to_beacons(priv, 1380 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1625 switch_time_in_usec, 1381 switch_time_in_usec,
1626 beacon_interval); 1382 beacon_interval);
1627 cmd.switch_time = iwl_add_beacon_time(priv, 1383 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1628 priv->ucode_beacon_time, 1384 priv->ucode_beacon_time,
1629 ucode_switch_time, 1385 ucode_switch_time,
1630 beacon_interval); 1386 beacon_interval);
1631 } 1387 }
1632 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 1388 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1633 cmd.switch_time); 1389 cmd.switch_time);
1634 ch_info = iwl_get_channel_info(priv, priv->band, ch); 1390 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1635 if (ch_info) 1391 if (ch_info)
1636 cmd.expect_beacon = is_channel_radar(ch_info); 1392 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1637 else { 1393 else {
1638 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1394 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1639 ctx->active.channel, ch); 1395 ctx->active.channel, ch);
@@ -1650,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1650 priv->switch_rxon.channel = cmd.channel; 1406 priv->switch_rxon.channel = cmd.channel;
1651 priv->switch_rxon.switch_in_progress = true; 1407 priv->switch_rxon.switch_in_progress = true;
1652 1408
1653 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1409 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1654} 1411}
1655 1412
1656/** 1413/**
@@ -1692,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1692 u32 R4; 1449 u32 R4;
1693 1450
1694 if (test_bit(STATUS_TEMPERATURE, &priv->status) && 1451 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1695 (priv->_agn.statistics.flag & 1452 (priv->_4965.statistics.flag &
1696 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { 1453 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1697 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); 1454 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1698 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 1455 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1717,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1717 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1474 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1718 vt = sign_extend32(R4, 23); 1475 vt = sign_extend32(R4, 23);
1719 else 1476 else
1720 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics. 1477 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1721 general.common.temperature), 23); 1478 general.common.temperature), 23);
1722 1479
1723 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1480 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1802,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1802 } 1559 }
1803 1560
1804 priv->temperature = temp; 1561 priv->temperature = temp;
1805 iwl_tt_handler(priv);
1806 set_bit(STATUS_TEMPERATURE, &priv->status); 1562 set_bit(STATUS_TEMPERATURE, &priv->status);
1807 1563
1808 if (!priv->disable_tx_power_cal && 1564 if (!priv->disable_tx_power_cal &&
@@ -1811,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1811 queue_work(priv->workqueue, &priv->txpower_work); 1567 queue_work(priv->workqueue, &priv->txpower_work);
1812} 1568}
1813 1569
1814/**
1815 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1816 */
1817static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
1818 u16 txq_id)
1819{
1820 /* Simply stop the queue, but don't change any configuration;
1821 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1822 iwl_write_prph(priv,
1823 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1824 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1825 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1826}
1827
1828/**
1829 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
1830 * priv->lock must be held by the caller
1831 */
1832static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1833 u16 ssn_idx, u8 tx_fifo)
1834{
1835 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1836 (IWL49_FIRST_AMPDU_QUEUE +
1837 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1838 IWL_WARN(priv,
1839 "queue number out of range: %d, must be %d to %d\n",
1840 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1841 IWL49_FIRST_AMPDU_QUEUE +
1842 priv->cfg->base_params->num_of_ampdu_queues - 1);
1843 return -EINVAL;
1844 }
1845
1846 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1847
1848 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1849
1850 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1851 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1852 /* supposes that ssn_idx is valid (!= 0xFFF) */
1853 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1854
1855 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1856 iwl_txq_ctx_deactivate(priv, txq_id);
1857 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1858
1859 return 0;
1860}
1861
1862/**
1863 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1864 */
1865static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
1866 u16 txq_id)
1867{
1868 u32 tbl_dw_addr;
1869 u32 tbl_dw;
1870 u16 scd_q2ratid;
1871
1872 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1873
1874 tbl_dw_addr = priv->scd_base_addr +
1875 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
1876
1877 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
1878
1879 if (txq_id & 0x1)
1880 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1881 else
1882 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1883
1884 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
1885
1886 return 0;
1887}
1888
1889
1890/**
1891 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1892 *
1893 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
1894 * i.e. it must be one of the higher queues used for aggregation
1895 */
1896static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1897 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
1898{
1899 unsigned long flags;
1900 u16 ra_tid;
1901 int ret;
1902
1903 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1904 (IWL49_FIRST_AMPDU_QUEUE +
1905 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1906 IWL_WARN(priv,
1907 "queue number out of range: %d, must be %d to %d\n",
1908 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1909 IWL49_FIRST_AMPDU_QUEUE +
1910 priv->cfg->base_params->num_of_ampdu_queues - 1);
1911 return -EINVAL;
1912 }
1913
1914 ra_tid = BUILD_RAxTID(sta_id, tid);
1915
1916 /* Modify device's station table to Tx this TID */
1917 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1918 if (ret)
1919 return ret;
1920
1921 spin_lock_irqsave(&priv->lock, flags);
1922
1923 /* Stop this Tx queue before configuring it */
1924 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1925
1926 /* Map receiver-address / traffic-ID to this queue */
1927 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1928
1929 /* Set this queue as a chain-building queue */
1930 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1931
1932 /* Place first TFD at index corresponding to start sequence number.
1933 * Assumes that ssn_idx is valid (!= 0xFFF) */
1934 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1935 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1936 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1937
1938 /* Set up Tx window size and frame limit for this queue */
1939 iwl_write_targ_mem(priv,
1940 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1941 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1942 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1943
1944 iwl_write_targ_mem(priv, priv->scd_base_addr +
1945 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1946 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1947 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1948
1949 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1950
1951 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1952 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1953
1954 spin_unlock_irqrestore(&priv->lock, flags);
1955
1956 return 0;
1957}
1958
1959
1960static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) 1570static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1961{ 1571{
1962 switch (cmd_id) { 1572 switch (cmd_id) {
@@ -1967,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1967 } 1577 }
1968} 1578}
1969 1579
1970static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1580static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1581 u8 *data)
1971{ 1582{
1972 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; 1583 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1973 addsta->mode = cmd->mode; 1584 addsta->mode = cmd->mode;
@@ -2020,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2020 status = le16_to_cpu(frame_status[0].status); 1631 status = le16_to_cpu(frame_status[0].status);
2021 idx = start_idx; 1632 idx = start_idx;
2022 1633
2023 /* FIXME: code repetition */
2024 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 1634 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
2025 agg->frame_count, agg->start_idx, idx); 1635 agg->frame_count, agg->start_idx, idx);
2026 1636
2027 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 1637 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
2028 info->status.rates[0].count = tx_resp->failure_frame + 1; 1638 info->status.rates[0].count = tx_resp->failure_frame + 1;
2029 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1639 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2030 info->flags |= iwl_tx_status_to_mac80211(status); 1640 info->flags |= iwl4965_tx_status_to_mac80211(status);
2031 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); 1641 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
2032 /* FIXME: code repetition end */
2033 1642
2034 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1643 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
2035 status & 0xff, tx_resp->failure_frame); 1644 status & 0xff, tx_resp->failure_frame);
@@ -2056,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2056 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 1665 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
2057 agg->frame_count, txq_id, idx); 1666 agg->frame_count, txq_id, idx);
2058 1667
2059 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1668 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
2060 if (!hdr) { 1669 if (!hdr) {
2061 IWL_ERR(priv, 1670 IWL_ERR(priv,
2062 "BUG_ON idx doesn't point to valid skb" 1671 "BUG_ON idx doesn't point to valid skb"
@@ -2107,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2107 return 0; 1716 return 0;
2108} 1717}
2109 1718
2110static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) 1719static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
2111{ 1720{
2112 int i; 1721 int i;
2113 int start = 0; 1722 int start = 0;
2114 int ret = IWL_INVALID_STATION; 1723 int ret = IWL_INVALID_STATION;
2115 unsigned long flags; 1724 unsigned long flags;
2116 1725
2117 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || 1726 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
2118 (priv->iw_mode == NL80211_IFTYPE_AP))
2119 start = IWL_STA_ID; 1727 start = IWL_STA_ID;
2120 1728
2121 if (is_broadcast_ether_addr(addr)) 1729 if (is_broadcast_ether_addr(addr))
@@ -2151,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2151 return ret; 1759 return ret;
2152} 1760}
2153 1761
2154static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) 1762static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2155{ 1763{
2156 if (priv->iw_mode == NL80211_IFTYPE_STATION) { 1764 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2157 return IWL_AP_ID; 1765 return IWL_AP_ID;
2158 } else { 1766 } else {
2159 u8 *da = ieee80211_get_DA(hdr); 1767 u8 *da = ieee80211_get_DA(hdr);
2160 return iwl_find_station(priv, da); 1768 return iwl4965_find_station(priv, da);
2161 } 1769 }
2162} 1770}
2163 1771
@@ -2182,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2182 u8 *qc = NULL; 1790 u8 *qc = NULL;
2183 unsigned long flags; 1791 unsigned long flags;
2184 1792
2185 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1793 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
2186 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 1794 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
2187 "is out of range [0-%d] %d %d\n", txq_id, 1795 "is out of range [0-%d] %d %d\n", txq_id,
2188 index, txq->q.n_bd, txq->q.write_ptr, 1796 index, txq->q.n_bd, txq->q.write_ptr,
@@ -2194,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2194 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 1802 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2195 memset(&info->status, 0, sizeof(info->status)); 1803 memset(&info->status, 0, sizeof(info->status));
2196 1804
2197 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 1805 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
2198 if (ieee80211_is_data_qos(hdr->frame_control)) { 1806 if (ieee80211_is_data_qos(hdr->frame_control)) {
2199 qc = ieee80211_get_qos_ctl(hdr); 1807 qc = ieee80211_get_qos_ctl(hdr);
2200 tid = qc[0] & 0xf; 1808 tid = qc[0] & 0xf;
2201 } 1809 }
2202 1810
2203 sta_id = iwl_get_ra_sta_id(priv, hdr); 1811 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2204 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { 1812 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2205 IWL_ERR(priv, "Station not known\n"); 1813 IWL_ERR(priv, "Station not known\n");
2206 return; 1814 return;
@@ -2217,51 +1825,52 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2217 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 1825 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
2218 1826
2219 /* check if BAR is needed */ 1827 /* check if BAR is needed */
2220 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) 1828 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
2221 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2222 1830
2223 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2224 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1832 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1833 txq->q.n_bd);
2225 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 1834 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2226 "%d index %d\n", scd_ssn , index); 1835 "%d index %d\n", scd_ssn , index);
2227 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1836 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2228 if (qc) 1837 if (qc)
2229 iwl_free_tfds_in_queue(priv, sta_id, 1838 iwl4965_free_tfds_in_queue(priv, sta_id,
2230 tid, freed); 1839 tid, freed);
2231 1840
2232 if (priv->mac80211_registered && 1841 if (priv->mac80211_registered &&
2233 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1842 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
2234 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1843 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
2235 iwl_wake_queue(priv, txq); 1844 iwl_legacy_wake_queue(priv, txq);
2236 } 1845 }
2237 } else { 1846 } else {
2238 info->status.rates[0].count = tx_resp->failure_frame + 1; 1847 info->status.rates[0].count = tx_resp->failure_frame + 1;
2239 info->flags |= iwl_tx_status_to_mac80211(status); 1848 info->flags |= iwl4965_tx_status_to_mac80211(status);
2240 iwlagn_hwrate_to_tx_control(priv, 1849 iwl4965_hwrate_to_tx_control(priv,
2241 le32_to_cpu(tx_resp->rate_n_flags), 1850 le32_to_cpu(tx_resp->rate_n_flags),
2242 info); 1851 info);
2243 1852
2244 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " 1853 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
2245 "rate_n_flags 0x%x retries %d\n", 1854 "rate_n_flags 0x%x retries %d\n",
2246 txq_id, 1855 txq_id,
2247 iwl_get_tx_fail_reason(status), status, 1856 iwl4965_get_tx_fail_reason(status), status,
2248 le32_to_cpu(tx_resp->rate_n_flags), 1857 le32_to_cpu(tx_resp->rate_n_flags),
2249 tx_resp->failure_frame); 1858 tx_resp->failure_frame);
2250 1859
2251 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1860 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2252 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1861 if (qc && likely(sta_id != IWL_INVALID_STATION))
2253 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1862 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
2254 else if (sta_id == IWL_INVALID_STATION) 1863 else if (sta_id == IWL_INVALID_STATION)
2255 IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); 1864 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
2256 1865
2257 if (priv->mac80211_registered && 1866 if (priv->mac80211_registered &&
2258 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1867 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
2259 iwl_wake_queue(priv, txq); 1868 iwl_legacy_wake_queue(priv, txq);
2260 } 1869 }
2261 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1870 if (qc && likely(sta_id != IWL_INVALID_STATION))
2262 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 1871 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
2263 1872
2264 iwl_check_abort_status(priv, tx_resp->frame_count, status); 1873 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
2265 1874
2266 spin_unlock_irqrestore(&priv->sta_lock, flags); 1875 spin_unlock_irqrestore(&priv->sta_lock, flags);
2267} 1876}
@@ -2271,8 +1880,8 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2271{ 1880{
2272 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1881 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2273 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; 1882 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
2274#ifdef CONFIG_IWLWIFI_DEBUG 1883 u8 rate __maybe_unused =
2275 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 1884 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2276 1885
2277 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " 1886 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
2278 "tsf:0x%.8x%.8x rate:%d\n", 1887 "tsf:0x%.8x%.8x rate:%d\n",
@@ -2281,79 +1890,24 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2281 le32_to_cpu(beacon->ibss_mgr_status), 1890 le32_to_cpu(beacon->ibss_mgr_status),
2282 le32_to_cpu(beacon->high_tsf), 1891 le32_to_cpu(beacon->high_tsf),
2283 le32_to_cpu(beacon->low_tsf), rate); 1892 le32_to_cpu(beacon->low_tsf), rate);
2284#endif
2285 1893
2286 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 1894 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2287
2288 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
2289 queue_work(priv->workqueue, &priv->beacon_update);
2290}
2291
2292static int iwl4965_calc_rssi(struct iwl_priv *priv,
2293 struct iwl_rx_phy_res *rx_resp)
2294{
2295 /* data from PHY/DSP regarding signal strength, etc.,
2296 * contents are always there, not configurable by host. */
2297 struct iwl4965_rx_non_cfg_phy *ncphy =
2298 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2299 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2300 >> IWL49_AGC_DB_POS;
2301
2302 u32 valid_antennae =
2303 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2304 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2305 u8 max_rssi = 0;
2306 u32 i;
2307
2308 /* Find max rssi among 3 possible receivers.
2309 * These values are measured by the digital signal processor (DSP).
2310 * They should stay fairly constant even as the signal strength varies,
2311 * if the radio's automatic gain control (AGC) is working right.
2312 * AGC value (see below) will provide the "interesting" info. */
2313 for (i = 0; i < 3; i++)
2314 if (valid_antennae & (1 << i))
2315 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2316
2317 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2318 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2319 max_rssi, agc);
2320
2321 /* dBm = max_rssi dB - agc dB - constant.
2322 * Higher AGC (higher radio gain) means lower signal. */
2323 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
2324} 1895}
2325 1896
2326
2327/* Set up 4965-specific Rx frame reply handlers */ 1897/* Set up 4965-specific Rx frame reply handlers */
2328static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 1898static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2329{ 1899{
2330 /* Legacy Rx frames */ 1900 /* Legacy Rx frames */
2331 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; 1901 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
2332 /* Tx response */ 1902 /* Tx response */
2333 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 1903 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2334 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; 1904 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
2335
2336 /* set up notification wait support */
2337 spin_lock_init(&priv->_agn.notif_wait_lock);
2338 INIT_LIST_HEAD(&priv->_agn.notif_waits);
2339 init_waitqueue_head(&priv->_agn.notif_waitq);
2340}
2341
2342static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2343{
2344 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2345}
2346
2347static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2348{
2349 cancel_work_sync(&priv->txpower_work);
2350} 1905}
2351 1906
2352static struct iwl_hcmd_ops iwl4965_hcmd = { 1907static struct iwl_hcmd_ops iwl4965_hcmd = {
2353 .rxon_assoc = iwl4965_send_rxon_assoc, 1908 .rxon_assoc = iwl4965_send_rxon_assoc,
2354 .commit_rxon = iwl4965_commit_rxon, 1909 .commit_rxon = iwl4965_commit_rxon,
2355 .set_rxon_chain = iwlagn_set_rxon_chain, 1910 .set_rxon_chain = iwl4965_set_rxon_chain,
2356 .send_bt_config = iwl_send_bt_config,
2357}; 1911};
2358 1912
2359static void iwl4965_post_scan(struct iwl_priv *priv) 1913static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2365,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
2365 * performing the scan, fire one off if needed 1919 * performing the scan, fire one off if needed
2366 */ 1920 */
2367 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) 1921 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2368 iwlcore_commit_rxon(priv, ctx); 1922 iwl_legacy_commit_rxon(priv, ctx);
2369} 1923}
2370 1924
2371static void iwl4965_post_associate(struct iwl_priv *priv) 1925static void iwl4965_post_associate(struct iwl_priv *priv)
@@ -2378,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2378 if (!vif || !priv->is_open) 1932 if (!vif || !priv->is_open)
2379 return; 1933 return;
2380 1934
2381 if (vif->type == NL80211_IFTYPE_AP) {
2382 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2383 return;
2384 }
2385
2386 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1935 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2387 return; 1936 return;
2388 1937
2389 iwl_scan_cancel_timeout(priv, 200); 1938 iwl_legacy_scan_cancel_timeout(priv, 200);
2390 1939
2391 conf = ieee80211_get_hw_conf(priv->hw); 1940 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2392 1941
2393 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1942 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2394 iwlcore_commit_rxon(priv, ctx); 1943 iwl_legacy_commit_rxon(priv, ctx);
2395 1944
2396 ret = iwl_send_rxon_timing(priv, ctx); 1945 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2397 if (ret) 1946 if (ret)
2398 IWL_WARN(priv, "RXON timing - " 1947 IWL_WARN(priv, "RXON timing - "
2399 "Attempting to continue.\n"); 1948 "Attempting to continue.\n");
2400 1949
2401 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 1950 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2402 1951
2403 iwl_set_rxon_ht(priv, &priv->current_ht_config); 1952 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2404 1953
2405 if (priv->cfg->ops->hcmd->set_rxon_chain) 1954 if (priv->cfg->ops->hcmd->set_rxon_chain)
2406 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 1955 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2422,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2422 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 1971 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2423 } 1972 }
2424 1973
2425 iwlcore_commit_rxon(priv, ctx); 1974 iwl_legacy_commit_rxon(priv, ctx);
2426 1975
2427 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 1976 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2428 vif->bss_conf.aid, ctx->active.bssid_addr); 1977 vif->bss_conf.aid, ctx->active.bssid_addr);
@@ -2431,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2431 case NL80211_IFTYPE_STATION: 1980 case NL80211_IFTYPE_STATION:
2432 break; 1981 break;
2433 case NL80211_IFTYPE_ADHOC: 1982 case NL80211_IFTYPE_ADHOC:
2434 iwlagn_send_beacon_cmd(priv); 1983 iwl4965_send_beacon_cmd(priv);
2435 break; 1984 break;
2436 default: 1985 default:
2437 IWL_ERR(priv, "%s Should not be called in %d mode\n", 1986 IWL_ERR(priv, "%s Should not be called in %d mode\n",
@@ -2443,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2443 * If chain noise has already been run, then we need to enable 1992 * If chain noise has already been run, then we need to enable
2444 * power management here */ 1993 * power management here */
2445 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) 1994 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2446 iwl_power_update_mode(priv, false); 1995 iwl_legacy_power_update_mode(priv, false);
2447 1996
2448 /* Enable Rx differential gain and sensitivity calibrations */ 1997 /* Enable Rx differential gain and sensitivity calibrations */
2449 iwl_chain_noise_reset(priv); 1998 iwl4965_chain_noise_reset(priv);
2450 priv->start_calib = 1; 1999 priv->start_calib = 1;
2451} 2000}
2452 2001
@@ -2462,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2462 return; 2011 return;
2463 2012
2464 /* The following should be done only at AP bring up */ 2013 /* The following should be done only at AP bring up */
2465 if (!iwl_is_associated_ctx(ctx)) { 2014 if (!iwl_legacy_is_associated_ctx(ctx)) {
2466 2015
2467 /* RXON - unassoc (to set timing command) */ 2016 /* RXON - unassoc (to set timing command) */
2468 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2017 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2469 iwlcore_commit_rxon(priv, ctx); 2018 iwl_legacy_commit_rxon(priv, ctx);
2470 2019
2471 /* RXON Timing */ 2020 /* RXON Timing */
2472 ret = iwl_send_rxon_timing(priv, ctx); 2021 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2473 if (ret) 2022 if (ret)
2474 IWL_WARN(priv, "RXON timing failed - " 2023 IWL_WARN(priv, "RXON timing failed - "
2475 "Attempting to continue.\n"); 2024 "Attempting to continue.\n");
@@ -2477,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2477 /* AP has all antennas */ 2026 /* AP has all antennas */
2478 priv->chain_noise_data.active_chains = 2027 priv->chain_noise_data.active_chains =
2479 priv->hw_params.valid_rx_ant; 2028 priv->hw_params.valid_rx_ant;
2480 iwl_set_rxon_ht(priv, &priv->current_ht_config); 2029 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2481 if (priv->cfg->ops->hcmd->set_rxon_chain) 2030 if (priv->cfg->ops->hcmd->set_rxon_chain)
2482 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2031 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2483 2032
@@ -2499,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2499 ~RXON_FLG_SHORT_SLOT_MSK; 2048 ~RXON_FLG_SHORT_SLOT_MSK;
2500 } 2049 }
2501 /* need to send beacon cmd before committing assoc RXON! */ 2050 /* need to send beacon cmd before committing assoc RXON! */
2502 iwlagn_send_beacon_cmd(priv); 2051 iwl4965_send_beacon_cmd(priv);
2503 /* restore RXON assoc */ 2052 /* restore RXON assoc */
2504 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2053 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2505 iwlcore_commit_rxon(priv, ctx); 2054 iwl_legacy_commit_rxon(priv, ctx);
2506 } 2055 }
2507 iwlagn_send_beacon_cmd(priv); 2056 iwl4965_send_beacon_cmd(priv);
2508
2509 /* FIXME - we need to add code here to detect a totally new
2510 * configuration, reset the AP, unassoc, rxon timing, assoc,
2511 * clear sta table, add BCAST sta... */
2512} 2057}
2513 2058
2514static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2059static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2515 .get_hcmd_size = iwl4965_get_hcmd_size, 2060 .get_hcmd_size = iwl4965_get_hcmd_size,
2516 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2061 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2517 .chain_noise_reset = iwl4965_chain_noise_reset, 2062 .request_scan = iwl4965_request_scan,
2518 .gain_computation = iwl4965_gain_computation,
2519 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2520 .calc_rssi = iwl4965_calc_rssi,
2521 .request_scan = iwlagn_request_scan,
2522 .post_scan = iwl4965_post_scan, 2063 .post_scan = iwl4965_post_scan,
2523}; 2064};
2524 2065
2525static struct iwl_lib_ops iwl4965_lib = { 2066static struct iwl_lib_ops iwl4965_lib = {
2526 .set_hw_params = iwl4965_hw_set_hw_params, 2067 .set_hw_params = iwl4965_hw_set_hw_params,
2527 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2068 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2528 .txq_set_sched = iwl4965_txq_set_sched, 2069 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2529 .txq_agg_enable = iwl4965_txq_agg_enable, 2070 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2530 .txq_agg_disable = iwl4965_txq_agg_disable, 2071 .txq_init = iwl4965_hw_tx_queue_init,
2531 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2532 .txq_free_tfd = iwl_hw_txq_free_tfd,
2533 .txq_init = iwl_hw_tx_queue_init,
2534 .rx_handler_setup = iwl4965_rx_handler_setup, 2072 .rx_handler_setup = iwl4965_rx_handler_setup,
2535 .setup_deferred_work = iwl4965_setup_deferred_work,
2536 .cancel_deferred_work = iwl4965_cancel_deferred_work,
2537 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 2073 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2538 .alive_notify = iwl4965_alive_notify,
2539 .init_alive_start = iwl4965_init_alive_start, 2074 .init_alive_start = iwl4965_init_alive_start,
2540 .load_ucode = iwl4965_load_bsm, 2075 .load_ucode = iwl4965_load_bsm,
2541 .dump_nic_event_log = iwl_dump_nic_event_log, 2076 .dump_nic_event_log = iwl4965_dump_nic_event_log,
2542 .dump_nic_error_log = iwl_dump_nic_error_log, 2077 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2543 .dump_fh = iwl_dump_fh, 2078 .dump_fh = iwl4965_dump_fh,
2544 .set_channel_switch = iwl4965_hw_channel_switch, 2079 .set_channel_switch = iwl4965_hw_channel_switch,
2545 .apm_ops = { 2080 .apm_ops = {
2546 .init = iwl_apm_init, 2081 .init = iwl_legacy_apm_init,
2547 .config = iwl4965_nic_config, 2082 .config = iwl4965_nic_config,
2548 }, 2083 },
2549 .eeprom_ops = { 2084 .eeprom_ops = {
@@ -2556,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = {
2556 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, 2091 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2557 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS 2092 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2558 }, 2093 },
2559 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2094 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2560 .release_semaphore = iwlcore_eeprom_release_semaphore, 2095 .release_semaphore = iwl4965_eeprom_release_semaphore,
2561 .calib_version = iwl4965_eeprom_calib_version,
2562 .query_addr = iwlcore_eeprom_query_addr,
2563 }, 2096 },
2564 .send_tx_power = iwl4965_send_tx_power, 2097 .send_tx_power = iwl4965_send_tx_power,
2565 .update_chain_flags = iwl_update_chain_flags, 2098 .update_chain_flags = iwl4965_update_chain_flags,
2566 .isr_ops = {
2567 .isr = iwl_isr_legacy,
2568 },
2569 .temp_ops = { 2099 .temp_ops = {
2570 .temperature = iwl4965_temperature_calib, 2100 .temperature = iwl4965_temperature_calib,
2571 }, 2101 },
2572 .debugfs_ops = { 2102 .debugfs_ops = {
2573 .rx_stats_read = iwl_ucode_rx_stats_read, 2103 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2574 .tx_stats_read = iwl_ucode_tx_stats_read, 2104 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2575 .general_stats_read = iwl_ucode_general_stats_read, 2105 .general_stats_read = iwl4965_ucode_general_stats_read,
2576 .bt_stats_read = iwl_ucode_bt_stats_read,
2577 .reply_tx_error = iwl_reply_tx_error_read,
2578 }, 2106 },
2579 .check_plcp_health = iwl_good_plcp_health, 2107 .check_plcp_health = iwl4965_good_plcp_health,
2580}; 2108};
2581 2109
2582static const struct iwl_legacy_ops iwl4965_legacy_ops = { 2110static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2583 .post_associate = iwl4965_post_associate, 2111 .post_associate = iwl4965_post_associate,
2584 .config_ap = iwl4965_config_ap, 2112 .config_ap = iwl4965_config_ap,
2585 .manage_ibss_station = iwlagn_manage_ibss_station, 2113 .manage_ibss_station = iwl4965_manage_ibss_station,
2586 .update_bcast_stations = iwl_update_bcast_stations, 2114 .update_bcast_stations = iwl4965_update_bcast_stations,
2587}; 2115};
2588 2116
2589struct ieee80211_ops iwl4965_hw_ops = { 2117struct ieee80211_ops iwl4965_hw_ops = {
2590 .tx = iwlagn_mac_tx, 2118 .tx = iwl4965_mac_tx,
2591 .start = iwlagn_mac_start, 2119 .start = iwl4965_mac_start,
2592 .stop = iwlagn_mac_stop, 2120 .stop = iwl4965_mac_stop,
2593 .add_interface = iwl_mac_add_interface, 2121 .add_interface = iwl_legacy_mac_add_interface,
2594 .remove_interface = iwl_mac_remove_interface, 2122 .remove_interface = iwl_legacy_mac_remove_interface,
2595 .change_interface = iwl_mac_change_interface, 2123 .change_interface = iwl_legacy_mac_change_interface,
2596 .config = iwl_legacy_mac_config, 2124 .config = iwl_legacy_mac_config,
2597 .configure_filter = iwlagn_configure_filter, 2125 .configure_filter = iwl4965_configure_filter,
2598 .set_key = iwlagn_mac_set_key, 2126 .set_key = iwl4965_mac_set_key,
2599 .update_tkip_key = iwlagn_mac_update_tkip_key, 2127 .update_tkip_key = iwl4965_mac_update_tkip_key,
2600 .conf_tx = iwl_mac_conf_tx, 2128 .conf_tx = iwl_legacy_mac_conf_tx,
2601 .reset_tsf = iwl_legacy_mac_reset_tsf, 2129 .reset_tsf = iwl_legacy_mac_reset_tsf,
2602 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 2130 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2603 .ampdu_action = iwlagn_mac_ampdu_action, 2131 .ampdu_action = iwl4965_mac_ampdu_action,
2604 .hw_scan = iwl_mac_hw_scan, 2132 .hw_scan = iwl_legacy_mac_hw_scan,
2605 .sta_add = iwlagn_mac_sta_add, 2133 .sta_add = iwl4965_mac_sta_add,
2606 .sta_remove = iwl_mac_sta_remove, 2134 .sta_remove = iwl_legacy_mac_sta_remove,
2607 .channel_switch = iwlagn_mac_channel_switch, 2135 .channel_switch = iwl4965_mac_channel_switch,
2608 .flush = iwlagn_mac_flush, 2136 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2609 .tx_last_beacon = iwl_mac_tx_last_beacon,
2610}; 2137};
2611 2138
2612static const struct iwl_ops iwl4965_ops = { 2139static const struct iwl_ops iwl4965_ops = {
2613 .lib = &iwl4965_lib, 2140 .lib = &iwl4965_lib,
2614 .hcmd = &iwl4965_hcmd, 2141 .hcmd = &iwl4965_hcmd,
2615 .utils = &iwl4965_hcmd_utils, 2142 .utils = &iwl4965_hcmd_utils,
2616 .led = &iwlagn_led_ops, 2143 .led = &iwl4965_led_ops,
2617 .legacy = &iwl4965_legacy_ops, 2144 .legacy = &iwl4965_legacy_ops,
2618 .ieee80211_ops = &iwl4965_hw_ops, 2145 .ieee80211_ops = &iwl4965_hw_ops,
2619}; 2146};
@@ -2625,22 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = {
2625 .pll_cfg_val = 0, 2152 .pll_cfg_val = 0,
2626 .set_l0s = true, 2153 .set_l0s = true,
2627 .use_bsm = true, 2154 .use_bsm = true,
2628 .use_isr_legacy = true,
2629 .broken_powersave = true,
2630 .led_compensation = 61, 2155 .led_compensation = 61,
2631 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2156 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2632 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2157 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2633 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2158 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2634 .temperature_kelvin = true, 2159 .temperature_kelvin = true,
2635 .max_event_log_size = 512, 2160 .max_event_log_size = 512,
2636 .tx_power_by_driver = true,
2637 .ucode_tracing = true, 2161 .ucode_tracing = true,
2638 .sensitivity_calib_by_driver = true, 2162 .sensitivity_calib_by_driver = true,
2639 .chain_noise_calib_by_driver = true, 2163 .chain_noise_calib_by_driver = true,
2640 .no_agg_framecnt_info = true,
2641}; 2164};
2642 2165
2643struct iwl_cfg iwl4965_agn_cfg = { 2166struct iwl_cfg iwl4965_cfg = {
2644 .name = "Intel(R) Wireless WiFi Link 4965AGN", 2167 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2645 .fw_name_pre = IWL4965_FW_PRE, 2168 .fw_name_pre = IWL4965_FW_PRE,
2646 .ucode_api_max = IWL4965_UCODE_API_MAX, 2169 .ucode_api_max = IWL4965_UCODE_API_MAX,
@@ -2651,7 +2174,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2651 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2174 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2652 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2175 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2653 .ops = &iwl4965_ops, 2176 .ops = &iwl4965_ops,
2654 .mod_params = &iwlagn_mod_params, 2177 .mod_params = &iwl4965_mod_params,
2655 .base_params = &iwl4965_base_params, 2178 .base_params = &iwl4965_base_params,
2656 .led_mode = IWL_LED_BLINK, 2179 .led_mode = IWL_LED_BLINK,
2657 /* 2180 /*
@@ -2663,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
2663 2186
2664/* Module firmware */ 2187/* Module firmware */
2665MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2188MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2666
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..01f8163daf16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_legacy_commands_h__
70#define __iwl_legacy_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99
100 /* Security */
101 REPLY_WEPKEY = 0x20,
102
103 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
115
116 /* Power Management */
117 POWER_TABLE_CMD = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
120
121 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81,
124 SCAN_START_NOTIFICATION = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84,
127
128 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90,
130 REPLY_TX_BEACON = 0x91,
131
132 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97,
134
135 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b,
137
138 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d,
141
142 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1,
144
145 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2,
147
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
149 SENSITIVITY_CMD = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1,
153 REPLY_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5,
155
156 REPLY_MAX = 0xff
157};
158
159/******************************************************************************
160 * (0)
161 * Commonly used structures and definitions:
162 * Command header, rate_n_flags, txpower
163 *
164 *****************************************************************************/
165
166/* iwl_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40
168
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175
176/**
177 * struct iwl_cmd_header
178 *
179 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode.
181 */
182struct iwl_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /*
186 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver
188 * when sending the response to each driver-originated command, so
189 * the driver can match the response to the command. Since the values
190 * don't get used by uCode, the driver may set up an arbitrary format.
191 *
192 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command.
197 *
198 * The Linux driver uses the following format:
199 *
200 * 0:7 tfd index - position within TX queue
201 * 8:12 TX queue id
202 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification
206 */
207 __le16 sequence;
208
209 /* command or response/notification data follows immediately */
210 u8 data[0];
211} __packed;
212
213
214/**
215 * struct iwl3945_tx_power
216 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
218 *
219 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
221 * linear value that multiplies the output of the digital signal processor,
222 * before being sent to the analog radio.
223 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
227 */
228struct iwl3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */
231} __packed;
232
233/**
234 * struct iwl3945_power_per_rate
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
237 */
238struct iwl3945_power_per_rate {
239 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc;
241 u8 reserved;
242} __packed;
243
244/**
245 * iwl4965 rate_n_flags bit fields
246 *
247 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only)
249 * REPLY_RX_MPDU (response only)
250 * REPLY_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD
252 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps
255 * 1) 12 Mbps
256 * 2) 18 Mbps
257 * 3) 24 Mbps
258 * 4) 36 Mbps
259 * 5) 48 Mbps
260 * 6) 54 Mbps
261 * 7) 60 Mbps
262 *
263 * 4-3: 0) Single stream (SISO)
264 * 1) Dual stream (MIMO)
265 * 2) Triple stream (MIMO)
266 *
267 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
268 *
269 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
270 * 3-0: 0xD) 6 Mbps
271 * 0xF) 9 Mbps
272 * 0x5) 12 Mbps
273 * 0x7) 18 Mbps
274 * 0x9) 24 Mbps
275 * 0xB) 36 Mbps
276 * 0x1) 48 Mbps
277 * 0x3) 54 Mbps
278 *
279 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
280 * 6-0: 10) 1 Mbps
281 * 20) 2 Mbps
282 * 55) 5.5 Mbps
283 * 110) 11 Mbps
284 */
285#define RATE_MCS_CODE_MSK 0x7
286#define RATE_MCS_SPATIAL_POS 3
287#define RATE_MCS_SPATIAL_MSK 0x18
288#define RATE_MCS_HT_DUP_POS 5
289#define RATE_MCS_HT_DUP_MSK 0x20
290
291/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
292#define RATE_MCS_FLAGS_POS 8
293#define RATE_MCS_HT_POS 8
294#define RATE_MCS_HT_MSK 0x100
295
296/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
297#define RATE_MCS_CCK_POS 9
298#define RATE_MCS_CCK_MSK 0x200
299
300/* Bit 10: (1) Use Green Field preamble */
301#define RATE_MCS_GF_POS 10
302#define RATE_MCS_GF_MSK 0x400
303
304/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
305#define RATE_MCS_HT40_POS 11
306#define RATE_MCS_HT40_MSK 0x800
307
308/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
309#define RATE_MCS_DUP_POS 12
310#define RATE_MCS_DUP_MSK 0x1000
311
312/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
313#define RATE_MCS_SGI_POS 13
314#define RATE_MCS_SGI_MSK 0x2000
315
316/**
317 * rate_n_flags Tx antenna masks
318 * 4965 has 2 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * union iwl4965_tx_power_dual_stream
338 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs,
343 * one value for each transmitter chain. The first value is for transmitter A,
344 * second for transmitter B.
345 *
346 * For SISO bit rates, both values in a pair should be identical.
347 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters.
349 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h.
351 */
352union iwl4965_tx_power_dual_stream {
353 struct {
354 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2];
356 } s;
357 u32 dw;
358};
359
360/**
361 * struct tx_power_dual_stream
362 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
364 *
365 * Same format as iwl_tx_power_dual_stream, but __le32
366 */
367struct tx_power_dual_stream {
368 __le32 dw;
369} __packed;
370
371/**
372 * struct iwl4965_tx_power_db
373 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
375 */
376struct iwl4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
378} __packed;
379
380/******************************************************************************
381 * (0a)
382 * Alive and Error Commands & Responses:
383 *
384 *****************************************************************************/
385
386#define UCODE_VALID_OK cpu_to_le32(0x1)
387#define INITIALIZE_SUBTYPE (9)
388
389/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
391 *
392 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image.
394 * This is the *first* "alive" notification that the driver will receive after
395 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
396 *
397 * See comments documenting "BSM" (bootstrap state machine).
398 *
399 * For 4965, this notification contains important calibration data for
400 * calculating txpower settings:
401 *
402 * 1) Power supply voltage indication. The voltage sensor outputs higher
403 * values for lower voltage, and vice verse.
404 *
405 * 2) Temperature measurement parameters, for each of two channel widths
406 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
407 * is done via one of the receiver chains, and channel width influences
408 * the results.
409 *
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges.
412 */
413struct iwl_init_alive_resp {
414 u8 ucode_minor;
415 u8 ucode_major;
416 __le16 reserved1;
417 u8 sw_rev[8];
418 u8 ver_type;
419 u8 ver_subtype; /* "9" for initialize alive */
420 __le16 reserved2;
421 __le32 log_event_table_ptr;
422 __le32 error_event_table_ptr;
423 __le32 timestamp;
424 __le32 is_valid;
425
426 /* calibration values from "initialize" uCode */
427 __le32 voltage; /* signed, higher value is lower voltage */
428 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
429 __le32 therm_r2[2]; /* signed */
430 __le32 therm_r3[2]; /* signed */
431 __le32 therm_r4[2]; /* signed */
432 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
433 * 2 Tx chains */
434} __packed;
435
436
437/**
438 * REPLY_ALIVE = 0x1 (response only, not a command)
439 *
440 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive"
442 * notification that the driver will receive after rebooting uCode;
443 * this "alive" is indicated by subtype field != 9.
444 *
445 * See comments documenting "BSM" (bootstrap state machine).
446 *
447 * This response includes two pointers to structures within the device's
448 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
449 *
450 * 1) log_event_table_ptr indicates base of the event log. This traces
451 * a 256-entry history of uCode execution within a circular buffer.
452 * Its header format is:
453 *
454 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill
458 *
459 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format:
461 *
462 * __le32 event_id; range 0 - 1500
463 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
464 * __le32 data; event_id-specific data value
465 *
466 * Entries without timestamps contain only event_id and data.
467 *
468 *
469 * 2) error_event_table_ptr indicates base of the error log. This contains
470 * information about any uCode error that occurs. For 4965, the format
471 * of the error log is:
472 *
473 * __le32 valid; (nonzero) valid, (0) log is empty
474 * __le32 error_id; type of error
475 * __le32 pc; program counter
476 * __le32 blink1; branch link
477 * __le32 blink2; branch link
478 * __le32 ilink1; interrupt link
479 * __le32 ilink2; interrupt link
480 * __le32 data1; error-specific data
481 * __le32 data2; error-specific data
482 * __le32 line; source code line of error
483 * __le32 bcon_time; beacon timer
484 * __le32 tsf_low; network timestamp function timer
485 * __le32 tsf_hi; network timestamp function timer
486 * __le32 gp1; GP1 timer register
487 * __le32 gp2; GP2 timer register
488 * __le32 gp3; GP3 timer register
489 * __le32 ucode_ver; uCode version
490 * __le32 hw_ver; HW Silicon version
491 * __le32 brd_ver; HW board version
492 * __le32 log_pc; log program counter
493 * __le32 frame_ptr; frame pointer
494 * __le32 stack_ptr; stack pointer
495 * __le32 hcmd; last host command
496 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
497 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
498 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
499 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
500 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
501 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
502 * __le32 wait_event; wait event() caller address
503 * __le32 l2p_control; L2pControlField
504 * __le32 l2p_duration; L2pDurationField
505 * __le32 l2p_mhvalid; L2pMhValidBits
506 * __le32 l2p_addr_match; L2pAddrMatchStat
507 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
508 * __le32 u_timestamp; indicate when the date and time of the compilation
509 * __le32 reserved;
510 *
511 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs.
513 */
514struct iwl_alive_resp {
515 u8 ucode_minor;
516 u8 ucode_major;
517 __le16 reserved1;
518 u8 sw_rev[8];
519 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */
524 __le32 timestamp;
525 __le32 is_valid;
526} __packed;
527
528/*
529 * REPLY_ERROR = 0x2 (response only, not a command)
530 */
531struct iwl_error_resp {
532 __le32 error_type;
533 u8 cmd_id;
534 u8 reserved1;
535 __le16 bad_cmd_seq_num;
536 __le32 error_info;
537 __le64 timestamp;
538} __packed;
539
540/******************************************************************************
541 * (1)
542 * RXON Commands & Responses:
543 *
544 *****************************************************************************/
545
546/*
547 * Rx config defines & structure
548 */
549/* rx_config device types */
550enum {
551 RXON_DEV_TYPE_AP = 1,
552 RXON_DEV_TYPE_ESS = 3,
553 RXON_DEV_TYPE_IBSS = 4,
554 RXON_DEV_TYPE_SNIFFER = 6,
555};
556
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
561#define RXON_RX_CHAIN_VALID_POS (1)
562#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
563#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
564#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
565#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
566#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
567#define RXON_RX_CHAIN_CNT_POS (10)
568#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
569#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
570#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
571#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
572
573/* rx_config flags */
574/* band & modulation selection */
575#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
576#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
577/* auto detection enable */
578#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
579/* TGg protection when tx */
580#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
581/* cck short slot & preamble */
582#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
583#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
584/* antenna selection */
585#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
586#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
587#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
588#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
589/* radar detection enable */
590#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
591#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
592/* rx response to host with 8-byte TSF
593* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595
596
597/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
600
601#define RXON_FLG_HT_OPERATING_MODE_POS (23)
602
603#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
604#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
605
606#define RXON_FLG_CHANNEL_MODE_POS (25)
607#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
608
609/* channel mode */
610enum {
611 CHANNEL_MODE_LEGACY = 0,
612 CHANNEL_MODE_PURE_40 = 1,
613 CHANNEL_MODE_MIXED = 2,
614 CHANNEL_MODE_RESERVED = 3,
615};
616#define RXON_FLG_CHANNEL_MODE_LEGACY \
617 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
618#define RXON_FLG_CHANNEL_MODE_PURE_40 \
619 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
620#define RXON_FLG_CHANNEL_MODE_MIXED \
621 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
622
623/* CTS to self (if spec allows) flag */
624#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
625
626/* rx_config filter flags */
627/* accept all data frames */
628#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
629/* pass control & management to host */
630#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
631/* accept multi-cast */
632#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
633/* don't decrypt uni-cast frames */
634#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
635/* don't decrypt multi-cast frames */
636#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
637/* STA is associated */
638#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
639/* transfer to host non bssid beacons in associated state */
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641
642/**
643 * REPLY_RXON = 0x10 (command, has simple generic response)
644 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations.
647 *
648 * NOTE: When tuning to a new channel, driver must set the
649 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
650 * info within the device, including the station tables, tx retry
651 * rate tables, and txpower tables. Driver must build a new station
652 * table and txpower table before transmitting anything on the RXON
653 * channel.
654 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */
659
660struct iwl3945_rxon_cmd {
661 u8 node_addr[6];
662 __le16 reserved1;
663 u8 bssid_addr[6];
664 __le16 reserved2;
665 u8 wlap_bssid_addr[6];
666 __le16 reserved3;
667 u8 dev_type;
668 u8 air_propagation;
669 __le16 reserved4;
670 u8 ofdm_basic_rates;
671 u8 cck_basic_rates;
672 __le16 assoc_id;
673 __le32 flags;
674 __le32 filter_flags;
675 __le16 channel;
676 __le16 reserved5;
677} __packed;
678
679struct iwl4965_rxon_cmd {
680 u8 node_addr[6];
681 __le16 reserved1;
682 u8 bssid_addr[6];
683 __le16 reserved2;
684 u8 wlap_bssid_addr[6];
685 __le16 reserved3;
686 u8 dev_type;
687 u8 air_propagation;
688 __le16 rx_chain;
689 u8 ofdm_basic_rates;
690 u8 cck_basic_rates;
691 __le16 assoc_id;
692 __le32 flags;
693 __le32 filter_flags;
694 __le16 channel;
695 u8 ofdm_ht_single_stream_basic_rates;
696 u8 ofdm_ht_dual_stream_basic_rates;
697} __packed;
698
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from.
701 */
702struct iwl_legacy_rxon_cmd {
703 u8 node_addr[6];
704 __le16 reserved1;
705 u8 bssid_addr[6];
706 __le16 reserved2;
707 u8 wlap_bssid_addr[6];
708 __le16 reserved3;
709 u8 dev_type;
710 u8 air_propagation;
711 __le16 rx_chain;
712 u8 ofdm_basic_rates;
713 u8 cck_basic_rates;
714 __le16 assoc_id;
715 __le32 flags;
716 __le32 filter_flags;
717 __le16 channel;
718 u8 ofdm_ht_single_stream_basic_rates;
719 u8 ofdm_ht_dual_stream_basic_rates;
720 u8 reserved4;
721 u8 reserved5;
722} __packed;
723
724
725/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */
728struct iwl3945_rxon_assoc_cmd {
729 __le32 flags;
730 __le32 filter_flags;
731 u8 ofdm_basic_rates;
732 u8 cck_basic_rates;
733 __le16 reserved;
734} __packed;
735
736struct iwl4965_rxon_assoc_cmd {
737 __le32 flags;
738 __le32 filter_flags;
739 u8 ofdm_basic_rates;
740 u8 cck_basic_rates;
741 u8 ofdm_ht_single_stream_basic_rates;
742 u8 ofdm_ht_dual_stream_basic_rates;
743 __le16 rx_chain_select_flags;
744 __le16 reserved;
745} __packed;
746
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750
751/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
753 */
754struct iwl_rxon_time_cmd {
755 __le64 timestamp;
756 __le16 beacon_interval;
757 __le16 atim_window;
758 __le32 beacon_init_val;
759 __le16 listen_interval;
760 u8 dtim_period;
761 u8 delta_cp_bss_tbtts;
762} __packed;
763
764/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */
767struct iwl3945_channel_switch_cmd {
768 u8 band;
769 u8 expect_beacon;
770 __le16 channel;
771 __le32 rxon_flags;
772 __le32 rxon_filter_flags;
773 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
775} __packed;
776
777struct iwl4965_channel_switch_cmd {
778 u8 band;
779 u8 expect_beacon;
780 __le16 channel;
781 __le32 rxon_flags;
782 __le32 rxon_filter_flags;
783 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power;
785} __packed;
786
787/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
789 */
790struct iwl_csa_notification {
791 __le16 band;
792 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */
794} __packed;
795
796/******************************************************************************
797 * (2)
798 * Quality-of-Service (QOS) Commands & Responses:
799 *
800 *****************************************************************************/
801
802/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
805 *
806 * @cw_min: Contention window, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value.
817 */
818struct iwl_ac_qos {
819 __le16 cw_min;
820 __le16 cw_max;
821 u8 aifsn;
822 u8 reserved1;
823 __le16 edca_txop;
824} __packed;
825
826/* QoS flags defines */
827#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
828#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
829#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
830
831/* Number of Access Categories (AC) (EDCA), queues 0..3 */
832#define AC_NUM 4
833
834/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
836 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */
840struct iwl_qosparam_cmd {
841 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM];
843} __packed;
844
845/******************************************************************************
846 * (3)
847 * Add/Modify Stations Commands & Responses:
848 *
849 *****************************************************************************/
850/*
851 * Multi station support
852 */
853
854/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0
856#define IWL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32
861
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
863#define IWL_INVALID_STATION 255
864
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
867#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
868#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
869#define STA_FLG_MAX_AGG_SIZE_POS (19)
870#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
871#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
872#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
873#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
874#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
875
876/* Use in mode field. 1: modify existing entry, 0: add new station entry */
877#define STA_CONTROL_MODIFY_MSK 0x01
878
879/* key flags __le16*/
880#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
881#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
882#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
883#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
884#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
885
886#define STA_KEY_FLG_KEYID_POS 8
887#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
888/* wep key is either from global key (0) or from station info array (1) */
889#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
890
891/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
892#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
893#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
894#define STA_KEY_MAX_NUM 8
895
896/* Flags indicate whether to modify vs. don't change various station params */
897#define STA_MODIFY_KEY_MASK 0x01
898#define STA_MODIFY_TID_DISABLE_TX 0x02
899#define STA_MODIFY_TX_RATE_MSK 0x04
900#define STA_MODIFY_ADDBA_TID_MSK 0x08
901#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903
904/* Receiver address (actually, Rx station's index into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907
908struct iwl4965_keyinfo {
909 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1;
912 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
913 u8 key_offset;
914 u8 reserved2;
915 u8 key[16]; /* 16-byte unicast decryption key */
916} __packed;
917
918/**
919 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 *
924 * Driver selects unused table index when adding new station,
925 * or the index to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
927 *
928 * modify_mask flags select which parameters to modify vs. leave alone.
929 */
930struct sta_id_modify {
931 u8 addr[ETH_ALEN];
932 __le16 reserved1;
933 u8 sta_id;
934 u8 modify_mask;
935 __le16 reserved2;
936} __packed;
937
938/*
939 * REPLY_ADD_STA = 0x18 (command)
940 *
941 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
946 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one.
949 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table
951 * clean. Moving into RF_KILL state does this also. Driver must set up
952 * new station table before transmitting anything on the RXON channel
953 * (except active scans or active measurements; those commands carry
954 * their own txpower/rate setup data).
955 *
956 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID.
963 */
964
965struct iwl3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3];
968 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */
972
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
974 * corresponding to bit (e.g. bit 5 controls TID 5).
975 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
976 __le16 tid_disable_tx;
977
978 __le16 rate_n_flags;
979
980 /* TID for which to add block-ack support.
981 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
982 u8 add_immediate_ba_tid;
983
984 /* TID for which to remove block-ack support.
985 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
986 u8 remove_immediate_ba_tid;
987
988 /* Starting Sequence Number for added block-ack support.
989 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
990 __le16 add_immediate_ba_ssn;
991} __packed;
992
993struct iwl4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3];
996 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */
1000
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1002 * corresponding to bit (e.g. bit 5 controls TID 5).
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx;
1005
1006 __le16 reserved1;
1007
1008 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1010 u8 add_immediate_ba_tid;
1011
1012 /* TID for which to remove block-ack support.
1013 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1014 u8 remove_immediate_ba_tid;
1015
1016 /* Starting Sequence Number for added block-ack support.
1017 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1018 __le16 add_immediate_ba_ssn;
1019
1020 /*
1021 * Number of packets OK to transmit to station even though
1022 * it is asleep -- used to synchronise PS-poll and u-APSD
1023 * responses while ucode keeps track of STA sleep state.
1024 */
1025 __le16 sleep_tx_count;
1026
1027 __le16 reserved2;
1028} __packed;
1029
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3];
1034 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */
1038
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1040 * corresponding to bit (e.g. bit 5 controls TID 5).
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx;
1043
1044 __le16 rate_n_flags; /* 3945 only */
1045
1046 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1048 u8 add_immediate_ba_tid;
1049
1050 /* TID for which to remove block-ack support.
1051 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1052 u8 remove_immediate_ba_tid;
1053
1054 /* Starting Sequence Number for added block-ack support.
1055 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1056 __le16 add_immediate_ba_ssn;
1057
1058 /*
1059 * Number of packets OK to transmit to station even though
1060 * it is asleep -- used to synchronise PS-poll and u-APSD
1061 * responses while ucode keeps track of STA sleep state.
1062 */
1063 __le16 sleep_tx_count;
1064
1065 __le16 reserved2;
1066} __packed;
1067
1068
1069#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/*
1074 * REPLY_ADD_STA = 0x18 (response)
1075 */
1076struct iwl_add_sta_resp {
1077 u8 status; /* ADD_STA_* */
1078} __packed;
1079
1080#define REM_STA_SUCCESS_MSK 0x1
1081/*
1082 * REPLY_REM_STA = 0x19 (response)
1083 */
1084struct iwl_rem_sta_resp {
1085 u8 status;
1086} __packed;
1087
1088/*
1089 * REPLY_REM_STA = 0x19 (command)
1090 */
1091struct iwl_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2];
1096} __packed;
1097
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103
1104#define IWL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2
1107
1108/*
1109 * REPLY_WEP_KEY = 0x20
1110 */
1111struct iwl_wep_key {
1112 u8 key_index;
1113 u8 key_offset;
1114 u8 reserved1[2];
1115 u8 key_size;
1116 u8 reserved2[3];
1117 u8 key[16];
1118} __packed;
1119
1120struct iwl_wep_cmd {
1121 u8 num_keys;
1122 u8 global_key_type;
1123 u8 flags;
1124 u8 reserved;
1125 struct iwl_wep_key key[0];
1126} __packed;
1127
1128#define WEP_KEY_WEP_TYPE 1
1129#define WEP_KEYS_MAX 4
1130#define WEP_INVALID_OFFSET 0xff
1131#define WEP_KEY_LEN_64 5
1132#define WEP_KEY_LEN_128 13
1133
1134/******************************************************************************
1135 * (4)
1136 * Rx Responses:
1137 *
1138 *****************************************************************************/
1139
1140#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1141#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1142
1143#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1144#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1145#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1146#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1147#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1148#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1149
1150#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1151#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1152#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1153#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1154#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1155#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1156
1157#define RX_RES_STATUS_STATION_FOUND (1<<6)
1158#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1159
1160#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1161#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1162#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1163#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1164#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1165
1166#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1167#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170
1171
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count;
1174 u8 id;
1175 u8 rssi;
1176 u8 agc;
1177 __le16 sig_avg;
1178 __le16 noise_diff;
1179 u8 payload[0];
1180} __packed;
1181
1182struct iwl3945_rx_frame_hdr {
1183 __le16 channel;
1184 __le16 phy_flags;
1185 u8 reserved1;
1186 u8 rate;
1187 __le16 len;
1188 u8 payload[0];
1189} __packed;
1190
1191struct iwl3945_rx_frame_end {
1192 __le32 status;
1193 __le64 timestamp;
1194 __le32 beacon_timestamp;
1195} __packed;
1196
1197/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command)
1199 *
1200 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count
1204 */
1205struct iwl3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end;
1209} __packed;
1210
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1212
1213/* Fixed (non-configurable) rx data from phy */
1214
1215#define IWL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0];
1225} __packed;
1226
1227
1228/*
1229 * REPLY_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames.
1231 */
1232struct iwl_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */
1245} __packed;
1246
1247struct iwl_rx_mpdu_res_start {
1248 __le16 byte_count;
1249 __le16 reserved;
1250} __packed;
1251
1252
1253/******************************************************************************
1254 * (5)
1255 * Tx Commands & Responses:
1256 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI
1261 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1262 * from which data will be transmitted.
1263 *
1264 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA.
1268 *
1269 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965).
1273 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1275 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/
1277
1278/* REPLY_TX Tx flags field */
1279
1280/*
1281 * 1: Use Request-To-Send protocol before this frame.
1282 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1283 */
1284#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1285
1286/*
1287 * 1: Transmit Clear-To-Send to self before this frame.
1288 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1289 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1290 */
1291#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1292
1293/* 1: Expect ACK from receiving station
1294 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1295 * Set this for unicast frames, but not broadcast/multicast. */
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297
1298/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */
1304#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1305
1306/* 1: Expect immediate block-ack.
1307 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1308#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1309
1310/*
1311 * 1: Frame requires full Tx-Op protection.
1312 * Set this if either RTS or CTS Tx Flag gets set.
1313 */
1314#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1315
1316/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1317 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1318#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1319#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1320#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1321
1322/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327
1328/* 1: This frame is non-last MPDU; more fragments are coming.
1329 * 0: Last fragment, or not using fragmentation. */
1330#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1331
1332/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1333 * 0: No TSF required in outgoing frame.
1334 * Set this for transmitting beacons and probe responses. */
1335#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1336
1337/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1338 * alignment of frame's payload data field.
1339 * 0: No pad
1340 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1341 * field (but not both). Driver must align frame data (i.e. data following
1342 * MAC header) to DWORD boundary. */
1343#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1344
1345/* accelerate aggregation support
1346 * 0 - no CCMP encryption; 1 - CCMP encryption */
1347#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1348
1349/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351
1352
1353/*
1354 * TX command security control
1355 */
1356#define TX_CMD_SEC_WEP 0x01
1357#define TX_CMD_SEC_CCM 0x02
1358#define TX_CMD_SEC_TKIP 0x03
1359#define TX_CMD_SEC_MSK 0x03
1360#define TX_CMD_SEC_SHIFT 6
1361#define TX_CMD_SEC_KEY128 0x08
1362
1363/*
1364 * security overhead sizes
1365 */
1366#define WEP_IV_LEN 4
1367#define WEP_ICV_LEN 4
1368#define CCMP_MIC_LEN 8
1369#define TKIP_ICV_LEN 4
1370
1371/*
1372 * REPLY_TX = 0x1c (command)
1373 */
1374
1375struct iwl3945_tx_cmd {
1376 /*
1377 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1379 * + 8 byte IV for CCM or TKIP (not used for WEP)
1380 * + Data payload
1381 * + 8-byte MIC (not used for CCM/WEP)
1382 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1383 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1384 * Range: 14-2342 bytes.
1385 */
1386 __le16 len;
1387
1388 /*
1389 * MPDU or MSDU byte count for next frame.
1390 * Used for fragmentation and bursting, but not 11n aggregation.
1391 * Same as "len", but for next frame. Set to 0 if not applicable.
1392 */
1393 __le16 next_frame_len;
1394
1395 __le32 tx_flags; /* TX_CMD_FLG_* */
1396
1397 u8 rate;
1398
1399 /* Index of recipient station in uCode's station table */
1400 u8 sta_id;
1401 u8 tid_tspec;
1402 u8 sec_ctl;
1403 u8 key[16];
1404 union {
1405 u8 byte[8];
1406 __le16 word[4];
1407 __le32 dw[2];
1408 } tkip_mic;
1409 __le32 next_frame_info;
1410 union {
1411 __le32 life_time;
1412 __le32 attempt;
1413 } stop_time;
1414 u8 supp_rates[2];
1415 u8 rts_retry_limit; /*byte 50 */
1416 u8 data_retry_limit; /*byte 51 */
1417 union {
1418 __le16 pm_frame_timeout;
1419 __le16 attempt_duration;
1420 } timeout;
1421
1422 /*
1423 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1424 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1425 */
1426 __le16 driver_txop;
1427
1428 /*
1429 * MAC header goes here, followed by 2 bytes padding if MAC header
1430 * length is 26 or 30 bytes, followed by payload data
1431 */
1432 u8 payload[0];
1433 struct ieee80211_hdr hdr[0];
1434} __packed;
1435
1436/*
1437 * REPLY_TX = 0x1c (response)
1438 */
1439struct iwl3945_tx_resp {
1440 u8 failure_rts;
1441 u8 failure_frame;
1442 u8 bt_kill_count;
1443 u8 rate;
1444 __le32 wireless_media_time;
1445 __le32 status; /* TX status */
1446} __packed;
1447
1448
1449/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0.
1453 */
1454struct iwl_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved;
1458} __packed;
1459
1460struct iwl_tx_cmd {
1461 /*
1462 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1464 * + 8 byte IV for CCM or TKIP (not used for WEP)
1465 * + Data payload
1466 * + 8-byte MIC (not used for CCM/WEP)
1467 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1468 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1469 * Range: 14-2342 bytes.
1470 */
1471 __le16 len;
1472
1473 /*
1474 * MPDU or MSDU byte count for next frame.
1475 * Used for fragmentation and bursting, but not 11n aggregation.
1476 * Same as "len", but for next frame. Set to 0 if not applicable.
1477 */
1478 __le16 next_frame_len;
1479
1480 __le32 tx_flags; /* TX_CMD_FLG_* */
1481
1482 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch;
1485
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */
1488
1489 /* Index of destination station in uCode's station table */
1490 u8 sta_id;
1491
1492 /* Type of security encryption: CCM or TKIP */
1493 u8 sec_ctl; /* TX_CMD_SEC_* */
1494
1495 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames.
1501 */
1502 u8 initial_rate_index;
1503 u8 reserved;
1504 u8 key[16];
1505 __le16 next_frame_flags;
1506 __le16 reserved2;
1507 union {
1508 __le32 life_time;
1509 __le32 attempt;
1510 } stop_time;
1511
1512 /* Host DRAM physical address pointer to "scratch" in this command.
1513 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1514 __le32 dram_lsb_ptr;
1515 u8 dram_msb_ptr;
1516
1517 u8 rts_retry_limit; /*byte 50 */
1518 u8 data_retry_limit; /*byte 51 */
1519 u8 tid_tspec;
1520 union {
1521 __le16 pm_frame_timeout;
1522 __le16 attempt_duration;
1523 } timeout;
1524
1525 /*
1526 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1527 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1528 */
1529 __le16 driver_txop;
1530
1531 /*
1532 * MAC header goes here, followed by 2 bytes padding if MAC header
1533 * length is 26 or 30 bytes, followed by payload data
1534 */
1535 u8 payload[0];
1536 struct ieee80211_hdr hdr[0];
1537} __packed;
1538
1539/* TX command response is sent after *3945* transmission attempts.
1540 *
1541 * NOTES:
1542 *
1543 * TX_STATUS_FAIL_NEXT_FRAG
1544 *
1545 * If the fragment flag in the MAC header for the frame being transmitted
1546 * is set and there is insufficient time to transmit the next frame, the
1547 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1548 *
1549 * TX_STATUS_FIFO_UNDERRUN
1550 *
1551 * Indicates the host did not provide bytes to the FIFO fast enough while
1552 * a TX was in progress.
1553 *
1554 * TX_STATUS_FAIL_MGMNT_ABORT
1555 *
1556 * This status is only possible if the ABORT ON MGMT RX parameter was
1557 * set to true with the TX command.
1558 *
1559 * If the MSB of the status parameter is set then an abort sequence is
1560 * required. This sequence consists of the host activating the TX Abort
1561 * control line, and then waiting for the TX Abort command response. This
1562 * indicates that a the device is no longer in a transmit state, and that the
1563 * command FIFO has been cleared. The host must then deactivate the TX Abort
1564 * control line. Receiving is still allowed in this case.
1565 */
1566enum {
1567 TX_3945_STATUS_SUCCESS = 0x01,
1568 TX_3945_STATUS_DIRECT_DONE = 0x02,
1569 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1570 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1571 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1572 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1573 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1574 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1575 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1576 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1577 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1578 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1579 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1580 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1581 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1582 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1583 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1584 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1585};
1586
1587/*
1588 * TX command response is sent after *4965* transmission attempts.
1589 *
1590 * both postpone and abort status are expected behavior from uCode. there is
1591 * no special operation required from driver; except for RFKILL_FLUSH,
1592 * which required tx flush host command to flush all the tx frames in queues
1593 */
1594enum {
1595 TX_STATUS_SUCCESS = 0x01,
1596 TX_STATUS_DIRECT_DONE = 0x02,
1597 /* postpone TX */
1598 TX_STATUS_POSTPONE_DELAY = 0x40,
1599 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1600 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1601 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1602 /* abort TX */
1603 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1604 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1605 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1606 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1607 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1608 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1609 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1610 TX_STATUS_FAIL_DEST_PS = 0x88,
1611 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1612 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1613 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1614 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1615 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1616 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1617 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1618 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1619 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1620};
1621
1622#define TX_PACKET_MODE_REGULAR 0x0000
1623#define TX_PACKET_MODE_BURST_SEQ 0x0100
1624#define TX_PACKET_MODE_BURST_FIRST 0x0200
1625
1626enum {
1627 TX_POWER_PA_NOT_ACTIVE = 0x0,
1628};
1629
1630enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639};
1640
1641/* *******************************
1642 * TX aggregation status
1643 ******************************* */
1644
1645enum {
1646 AGG_TX_STATE_TRANSMITTED = 0x00,
1647 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1648 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1649 AGG_TX_STATE_ABORT_MSK = 0x08,
1650 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1651 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1652 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1653 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1654 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1655 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1656 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1657};
1658
1659#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1660#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1661
1662#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1663 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1664
1665/* # tx attempts for first frame in aggregation */
1666#define AGG_TX_STATE_TRY_CNT_POS 12
1667#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1668
1669/* Command ID and sequence number of Tx command for this frame */
1670#define AGG_TX_STATE_SEQ_NUM_POS 16
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672
1673/*
1674 * REPLY_TX = 0x1c (response)
1675 *
1676 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field:
1678 *
1679 * 1) No aggregation (frame_count == 1). This reports Tx results for
1680 * a single frame. Multiple attempts, at various bit rates, may have
1681 * been made for this frame.
1682 *
1683 * 2) Aggregation (frame_count > 1). This reports Tx results for
1684 * 2 or more frames that used block-acknowledge. All frames were
1685 * transmitted at same rate. Rate scaling may have been used if first
1686 * frame in this new agg block failed in previous agg block(s).
1687 *
1688 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1689 * block-ack has not been received by the time the 4965 device records
1690 * this status.
1691 * This status relates to reasons the tx might have been blocked or aborted
1692 * within the sending station (this 4965 device), rather than whether it was
1693 * received successfully by the destination station.
1694 */
1695struct agg_tx_status {
1696 __le16 status;
1697 __le16 sequence;
1698} __packed;
1699
1700struct iwl4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */
1704 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1705
1706 /* For non-agg: Rate at which frame was successful.
1707 * For agg: Rate at which all frames were transmitted. */
1708 __le32 rate_n_flags; /* RATE_MCS_* */
1709
1710 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1711 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1712 __le16 wireless_media_time; /* uSecs */
1713
1714 __le16 reserved;
1715 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1716 __le32 pa_power2;
1717
1718 /*
1719 * For non-agg: frame status TX_STATUS_*
1720 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1721 * fields follow this one, up to frame_count.
1722 * Bit fields:
1723 * 11- 0: AGG_TX_STATE_* status code
1724 * 15-12: Retry count for 1st frame in aggregation (retries
1725 * occur if tx failed for this frame when it was a
1726 * member of a previous aggregation block). If rate
1727 * scaling is used, retry count indicates the rate
1728 * table entry used for all frames in the new agg.
1729 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1730 */
1731 union {
1732 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u;
1735} __packed;
1736
1737/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 *
1740 * Reports Block-Acknowledge from recipient station
1741 */
1742struct iwl_compressed_ba_resp {
1743 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16;
1745 __le16 reserved;
1746
1747 /* Index of recipient (BA-sending) station in uCode's station table */
1748 u8 sta_id;
1749 u8 tid;
1750 __le16 seq_ctl;
1751 __le64 bitmap;
1752 __le16 scd_flow;
1753 __le16 scd_ssn;
1754} __packed;
1755
1756/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1758 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h.
1760 */
1761
1762struct iwl3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved;
1765 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1767} __packed;
1768
1769struct iwl4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved;
1772 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power;
1774} __packed;
1775
1776
1777/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1781 *
1782 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of
1784 * rates used for all related commands, including rate
1785 * masks, etc.
1786 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON
1790 * command would be bit 0 (1 << 0)
1791 */
1792struct iwl3945_rate_scaling_info {
1793 __le16 rate_n_flags;
1794 u8 try_cnt;
1795 u8 next_rate_index;
1796} __packed;
1797
1798struct iwl3945_rate_scaling_cmd {
1799 u8 table_id;
1800 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1802} __packed;
1803
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807
1808/* # of EDCA prioritized tx fifos */
1809#define LINK_QUAL_AC_NUM AC_NUM
1810
1811/* # entries in rate scale table to support Tx retries */
1812#define LINK_QUAL_MAX_RETRY_NUM 16
1813
1814/* Tx antenna selection values */
1815#define LINK_QUAL_ANT_A_MSK (1 << 0)
1816#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818
1819
1820/**
1821 * struct iwl_link_qual_general_params
1822 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD
1824 */
1825struct iwl_link_qual_general_params {
1826 u8 flags;
1827
1828 /* No entries at or above this (driver chosen) index contain MIMO */
1829 u8 mimo_delimiter;
1830
1831 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836
1837 /*
1838 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0.
1843 *
1844 * Entry usage:
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM];
1849} __packed;
1850
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854
1855#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1856#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1857#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1858
1859#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1860#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862
1863/**
1864 * struct iwl_link_qual_agg_params
1865 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD
1867 */
1868struct iwl_link_qual_agg_params {
1869
1870 /*
1871 *Maximum number of uSec in aggregation.
1872 * default set to 4000 (4 milliseconds) if not configured in .cfg
1873 */
1874 __le16 agg_time_limit;
1875
1876 /*
1877 * Number of Tx retries allowed for a frame, before that frame will
1878 * no longer be considered for the start of an aggregation sequence
1879 * (scheduler will then try to tx it as single frame).
1880 * Driver should set this to 3.
1881 */
1882 u8 agg_dis_start_th;
1883
1884 /*
1885 * Maximum number of frames in aggregation.
1886 * 0 = no limit (default). 1 = no aggregation.
1887 * Other values = max # frames in aggregation.
1888 */
1889 u8 agg_frame_cnt_limit;
1890
1891 __le32 reserved;
1892} __packed;
1893
1894/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
1898 *
1899 * Each station in the 4965 device's internal station table has its own table
1900 * of 16
1901 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1902 * an ACK is not received. This command replaces the entire table for
1903 * one station.
1904 *
1905 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA.
1907 *
1908 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments.
1910 *
1911 *
1912 * FILLING THE RATE TABLE
1913 *
1914 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following
1916 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1917 * Link Quality command:
1918 *
1919 *
1920 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1921 * a) Use this same initial rate for first 3 entries.
1922 * b) Find next lower available rate using same mode (SISO or MIMO),
1923 * use for next 3 entries. If no lower rate available, switch to
1924 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1925 * c) If using MIMO, set command's mimo_delimiter to number of entries
1926 * using MIMO (3 or 6).
1927 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1928 * no MIMO, no short guard interval), at the next lower bit rate
1929 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1930 * legacy procedure for remaining table entries.
1931 *
1932 * 2) If using legacy initial rate:
1933 * a) Use the initial rate for only one entry.
1934 * b) For each following entry, reduce the rate to next lower available
1935 * rate, until reaching the lowest available rate.
1936 * c) When reducing rate, also switch antenna selection.
1937 * d) Once lowest available rate is reached, repeat this rate until
1938 * rate table is filled (16 entries), switching antenna each entry.
1939 *
1940 *
1941 * ACCUMULATING HISTORY
1942 *
1943 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1944 * uses two sets of frame Tx success history: One for the current/active
1945 * modulation mode, and one for a speculative/search mode that is being
1946 * attempted. If the speculative mode turns out to be more effective (i.e.
1947 * actual transfer rate is better), then the driver continues to use the
1948 * speculative mode as the new current active mode.
1949 *
1950 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window.
1958 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation
1961 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1962 * up in the rate scaling table in the Link Quality command. The driver must
1963 * determine which rate table entry was used for each tx attempt, to determine
1964 * which rate-specific history to update, and record only those attempts that
1965 * match the modulation characteristics of the history set.
1966 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once.
1972 *
1973 *
1974 * FINDING BEST STARTING RATE:
1975 *
1976 * When working with a selected initial modulation mode (see below), the
1977 * driver attempts to find a best initial rate. The initial rate is the
1978 * first entry in the Link Quality command's rate table.
1979 *
1980 * 1) Calculate actual throughput (success ratio * expected throughput, see
1981 * table below) for current initial rate. Do this only if enough frames
1982 * have been attempted to make the value meaningful: at least 6 failed
1983 * tx attempts, or at least 8 successes. If not enough, don't try rate
1984 * scaling yet.
1985 *
1986 * 2) Find available rates adjacent to current initial rate. Available means:
1987 * a) supported by hardware &&
1988 * b) supported by association &&
1989 * c) within any constraints selected by user
1990 *
1991 * 3) Gather measured throughputs for adjacent rates. These might not have
1992 * enough history to calculate a throughput. That's okay, we might try
1993 * using one of them anyway!
1994 *
1995 * 4) Try decreasing rate if, for current rate:
1996 * a) success ratio is < 15% ||
1997 * b) lower adjacent rate has better measured throughput ||
1998 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1999 *
2000 * As a sanity check, if decrease was determined above, leave rate
2001 * unchanged if:
2002 * a) lower rate unavailable
2003 * b) success ratio at current rate > 85% (very good)
2004 * c) current measured throughput is better than expected throughput
2005 * of lower rate (under perfect 100% tx conditions, see table below)
2006 *
2007 * 5) Try increasing rate if, for current rate:
2008 * a) success ratio is < 15% ||
2009 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
2010 * b) higher adjacent rate has better measured throughput ||
2011 * c) lower adjacent rate has worse throughput, and higher is unmeasured
2012 *
2013 * As a sanity check, if increase was determined above, leave rate
2014 * unchanged if:
2015 * a) success ratio at current rate < 70%. This is not particularly
2016 * good performance; higher rate is sure to have poorer success.
2017 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire
2020 * block (including prior history that fits within the history windows),
2021 * before re-evaluation.
2022 *
2023 * FINDING BEST STARTING MODULATION MODE:
2024 *
2025 * After working with a modulation mode for a "while" (and doing rate scaling),
2026 * the driver searches for a new initial mode in an attempt to improve
2027 * throughput. The "while" is measured by numbers of attempted frames:
2028 *
2029 * For legacy mode, search for new mode after:
2030 * 480 successful frames, or 160 failed frames
2031 * For high-throughput modes (SISO or MIMO), search for new mode after:
2032 * 4500 successful frames, or 400 failed frames
2033 *
2034 * Mode switch possibilities are (3 for each mode):
2035 *
2036 * For legacy:
2037 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2038 * For SISO:
2039 * Change antenna, try MIMO, try shortened guard interval (SGI)
2040 * For MIMO:
2041 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2042 *
2043 * When trying a new mode, use the same bit rate as the old/current mode when
2044 * trying antenna switches and shortened guard interval. When switching to
2045 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2046 * for which the expected throughput (under perfect conditions) is about the
2047 * same or slightly better than the actual measured throughput delivered by
2048 * the old/current mode.
2049 *
2050 * Actual throughput can be estimated by multiplying the expected throughput
2051 * by the success ratio (successful / attempted tx frames). Frame size is
2052 * not considered in this calculation; it assumes that frame size will average
2053 * out to be fairly consistent over several samples. The following are
2054 * metric values for expected throughput assuming 100% success ratio.
2055 * Only G band has support for CCK rates:
2056 *
2057 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2058 *
2059 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2060 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2061 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2062 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2063 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2064 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2065 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2066 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2067 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2068 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2069 *
2070 * After the new mode has been tried for a short while (minimum of 6 failed
2071 * frames or 8 successful frames), compare success ratio and actual throughput
2072 * estimate of the new mode with the old. If either is better with the new
2073 * mode, continue to use the new mode.
2074 *
2075 * Continue comparing modes until all 3 possibilities have been tried.
2076 * If moving from legacy to HT, try all 3 possibilities from the new HT
2077 * mode. After trying all 3, a best mode is found. Continue to use this mode
2078 * for the longer "while" described above (e.g. 480 successful frames for
2079 * legacy), and then repeat the search process.
2080 *
2081 */
2082struct iwl_link_quality_cmd {
2083
2084 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id;
2086 u8 reserved1;
2087 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params;
2090
2091 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2093 * specifies 1st Tx rate attempted, via index into this table.
2094 * 4965 devices works its way through table when retrying Tx.
2095 */
2096 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2;
2100} __packed;
2101
2102/*
2103 * BT configuration enable flags:
2104 * bit 0 - 1: BT channel announcement enabled
2105 * 0: disable
2106 * bit 1 - 1: priority of BT device enabled
2107 * 0: disable
2108 */
2109#define BT_COEX_DISABLE (0x0)
2110#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2111#define BT_ENABLE_PRIORITY BIT(1)
2112
2113#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2114
2115#define BT_LEAD_TIME_DEF (0x1E)
2116
2117#define BT_MAX_KILL_DEF (0x5)
2118
2119/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2121 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate.
2125 */
2126struct iwl_bt_cmd {
2127 u8 flags;
2128 u8 lead_time;
2129 u8 max_kill;
2130 u8 reserved;
2131 __le32 kill_ack_mask;
2132 __le32 kill_cts_mask;
2133} __packed;
2134
2135
2136/******************************************************************************
2137 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2139 *
2140 *****************************************************************************/
2141
2142/*
2143 * Spectrum Management
2144 */
2145#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2146 RXON_FILTER_CTL2HOST_MSK | \
2147 RXON_FILTER_ACCEPT_GRP_MSK | \
2148 RXON_FILTER_DIS_DECRYPT_MSK | \
2149 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2150 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK)
2152
2153struct iwl_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon
2155 * format */
2156 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */
2158 __le16 reserved;
2159} __packed;
2160
2161/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2163 */
2164struct iwl_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */
2168 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2169 u8 periodic; /* 1 = periodic */
2170 __le16 path_loss_timeout;
2171 __le32 start_time; /* start time in extended beacon format */
2172 __le32 reserved2;
2173 __le32 flags; /* rxon flags */
2174 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3;
2177 struct iwl_measure_channel channels[10];
2178} __packed;
2179
2180/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2182 */
2183struct iwl_spectrum_resp {
2184 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled
2187 * 1 - cannot handle (conflicts with another
2188 * measurement) */
2189} __packed;
2190
2191enum iwl_measurement_state {
2192 IWL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1,
2194};
2195
2196enum iwl_measurement_status {
2197 IWL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2205};
2206
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208
2209struct iwl_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed;
2213
2214/* clear channel availability counters */
2215struct iwl_measurement_cca_counters {
2216 __le32 ofdm;
2217 __le32 cck;
2218} __packed;
2219
2220enum iwl_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7),
2228};
2229
2230/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2232 */
2233struct iwl_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token;
2236 u8 channel_index; /* index in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */
2242 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */
2245 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2246 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2247 __le32 cca_time; /* channel load time in usecs */
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */
2250 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */
2254} __packed;
2255
2256/******************************************************************************
2257 * (7)
2258 * Power Management Commands, Responses, Notifications:
2259 *
2260 *****************************************************************************/
2261
2262/**
2263 * struct iwl_powertable_cmd - Power Table Command
2264 * @flags: See below:
2265 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2267 *
2268 * PM allow:
2269 * bit 0 - '0' Driver not allow power management
2270 * '1' Driver allow PM (use rest of parameters)
2271 *
2272 * uCode send sleep notifications:
2273 * bit 1 - '0' Don't send sleep notification
2274 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2275 *
2276 * Sleep over DTIM
2277 * bit 2 - '0' PM have to walk up every DTIM
2278 * '1' PM could sleep over DTIM till listen Interval.
2279 *
2280 * PCI power managed
2281 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2282 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2283 *
2284 * Fast PD
2285 * bit 4 - '1' Put radio to sleep when receiving frame for others
2286 *
2287 * Force sleep Modes
2288 * bit 31/30- '00' use both mac/xtal sleeps
2289 * '01' force Mac sleep
2290 * '10' force xtal sleep
2291 * '11' Illegal set
2292 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM.
2296 */
2297#define IWL_POWER_VEC_SIZE 5
2298
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2301#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2302#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2303#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2304#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2305#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2306#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2307#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2308
2309struct iwl3945_powertable_cmd {
2310 __le16 flags;
2311 u8 reserved[2];
2312 __le32 rx_data_timeout;
2313 __le32 tx_data_timeout;
2314 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2315} __packed;
2316
2317struct iwl_powertable_cmd {
2318 __le16 flags;
2319 u8 keep_alive_seconds; /* 3945 reserved */
2320 u8 debug_flags; /* 3945 reserved */
2321 __le32 rx_data_timeout;
2322 __le32 tx_data_timeout;
2323 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2324 __le32 keep_alive_beacons;
2325} __packed;
2326
2327/*
2328 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2329 * all devices identical.
2330 */
2331struct iwl_sleep_notification {
2332 u8 pm_sleep_mode;
2333 u8 pm_wakeup_src;
2334 __le16 reserved;
2335 __le32 sleep_time;
2336 __le32 tsf_low;
2337 __le32 bcon_timer;
2338} __packed;
2339
2340/* Sleep states. all devices identical. */
2341enum {
2342 IWL_PM_NO_SLEEP = 0,
2343 IWL_PM_SLP_MAC = 1,
2344 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2345 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2346 IWL_PM_SLP_PHY = 4,
2347 IWL_PM_SLP_REPENT = 5,
2348 IWL_PM_WAKEUP_BY_TIMER = 6,
2349 IWL_PM_WAKEUP_BY_DRIVER = 7,
2350 IWL_PM_WAKEUP_BY_RFKILL = 8,
2351 /* 3 reserved */
2352 IWL_PM_NUM_OF_MODES = 12,
2353};
2354
2355/*
2356 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2357 */
2358struct iwl_card_state_notif {
2359 __le32 flags;
2360} __packed;
2361
2362#define HW_CARD_DISABLED 0x01
2363#define SW_CARD_DISABLED 0x02
2364#define CT_CARD_DISABLED 0x04
2365#define RXON_CARD_DISABLED 0x10
2366
2367struct iwl_ct_kill_config {
2368 __le32 reserved;
2369 __le32 critical_temperature_M;
2370 __le32 critical_temperature_R;
2371} __packed;
2372
2373/******************************************************************************
2374 * (8)
2375 * Scan Commands, Responses, Notifications:
2376 *
2377 *****************************************************************************/
2378
2379#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2380#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2381
2382/**
2383 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2384 *
2385 * One for each channel in the scan list.
2386 * Each channel can independently select:
2387 * 1) SSID for directed active scans
2388 * 2) Txpower setting (for rate specified within Tx command)
2389 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2390 * quiet_plcp_th, good_CRC_th)
2391 *
2392 * To avoid uCode errors, make sure the following are true (see comments
2393 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2394 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2395 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2396 * 2) quiet_time <= active_dwell
2397 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2398 * passive_dwell < max_out_time
2399 * active_dwell < max_out_time
2400 */
2401struct iwl3945_scan_channel {
2402 /*
2403 * type is defined as:
2404 * 0:0 1 = active, 0 = passive
2405 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2406 * SSID IE is transmitted in probe request.
2407 * 5:7 reserved
2408 */
2409 u8 type;
2410 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2411 struct iwl3945_tx_power tpc;
2412 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2413 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2414} __packed;
2415
2416/* set number of direct probes u8 type */
2417#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2418
2419struct iwl_scan_channel {
2420 /*
2421 * type is defined as:
2422 * 0:0 1 = active, 0 = passive
2423 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2424 * SSID IE is transmitted in probe request.
2425 * 21:31 reserved
2426 */
2427 __le32 type;
2428 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2429 u8 tx_gain; /* gain for analog radio */
2430 u8 dsp_atten; /* gain for DSP */
2431 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2432 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2433} __packed;
2434
2435/* set number of direct probes __le32 type */
2436#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2437
2438/**
2439 * struct iwl_ssid_ie - directed scan network information element
2440 *
2441 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2442 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2443 * each channel may select different ssids from among the 20 (4) entries.
2444 * SSID IEs get transmitted in reverse order of entry.
2445 */
2446struct iwl_ssid_ie {
2447 u8 id;
2448 u8 len;
2449 u8 ssid[32];
2450} __packed;
2451
2452#define PROBE_OPTION_MAX_3945 4
2453#define PROBE_OPTION_MAX 20
2454#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2455#define IWL_GOOD_CRC_TH_DISABLED 0
2456#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2457#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2458#define IWL_MAX_SCAN_SIZE 1024
2459#define IWL_MAX_CMD_SIZE 4096
2460
2461/*
2462 * REPLY_SCAN_CMD = 0x80 (command)
2463 *
2464 * The hardware scan command is very powerful; the driver can set it up to
2465 * maintain (relatively) normal network traffic while doing a scan in the
2466 * background. The max_out_time and suspend_time control the ratio of how
2467 * long the device stays on an associated network channel ("service channel")
2468 * vs. how long it's away from the service channel, i.e. tuned to other channels
2469 * for scanning.
2470 *
2471 * max_out_time is the max time off-channel (in usec), and suspend_time
2472 * is how long (in "extended beacon" format) that the scan is "suspended"
2473 * after returning to the service channel. That is, suspend_time is the
2474 * time that we stay on the service channel, doing normal work, between
2475 * scan segments. The driver may set these parameters differently to support
2476 * scanning when associated vs. not associated, and light vs. heavy traffic
2477 * loads when associated.
2478 *
2479 * After receiving this command, the device's scan engine does the following;
2480 *
2481 * 1) Sends SCAN_START notification to driver
2482 * 2) Checks to see if it has time to do scan for one channel
2483 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2484 * to tell AP that we're going off-channel
2485 * 4) Tunes to first channel in scan list, does active or passive scan
2486 * 5) Sends SCAN_RESULT notification to driver
2487 * 6) Checks to see if it has time to do scan on *next* channel in list
2488 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2489 * before max_out_time expires
2490 * 8) Returns to service channel
2491 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2492 * 10) Stays on service channel until suspend_time expires
2493 * 11) Repeats entire process 2-10 until list is complete
2494 * 12) Sends SCAN_COMPLETE notification
2495 *
2496 * For fast, efficient scans, the scan command also has support for staying on
2497 * a channel for just a short time, if doing active scanning and getting no
2498 * responses to the transmitted probe request. This time is controlled by
2499 * quiet_time, and the number of received packets below which a channel is
2500 * considered "quiet" is controlled by quiet_plcp_threshold.
2501 *
2502 * For active scanning on channels that have regulatory restrictions against
2503 * blindly transmitting, the scan can listen before transmitting, to make sure
2504 * that there is already legitimate activity on the channel. If enough
2505 * packets are cleanly received on the channel (controlled by good_CRC_th,
2506 * typical value 1), the scan engine starts transmitting probe requests.
2507 *
2508 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2509 *
2510 * To avoid uCode errors, see timing restrictions described under
2511 * struct iwl_scan_channel.
2512 */
2513
2514struct iwl3945_scan_cmd {
2515 __le16 len;
2516 u8 reserved0;
2517 u8 channel_count; /* # channels in channel list */
2518 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2519 * (only for active scan) */
2520 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2521 __le16 good_CRC_th; /* passive -> active promotion threshold */
2522 __le16 reserved1;
2523 __le32 max_out_time; /* max usec to be away from associated (service)
2524 * channel */
2525 __le32 suspend_time; /* pause scan this long (in "extended beacon
2526 * format") when returning to service channel:
2527 * 3945; 31:24 # beacons, 19:0 additional usec,
2528 * 4965; 31:22 # beacons, 21:0 additional usec.
2529 */
2530 __le32 flags; /* RXON_FLG_* */
2531 __le32 filter_flags; /* RXON_FILTER_* */
2532
2533 /* For active scans (set to all-0s for passive scans).
2534 * Does not include payload. Must specify Tx rate; no rate scaling. */
2535 struct iwl3945_tx_cmd tx_cmd;
2536
2537 /* For directed active scans (set to all-0s otherwise) */
2538 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2539
2540 /*
2541 * Probe request frame, followed by channel list.
2542 *
2543 * Size of probe request frame is specified by byte count in tx_cmd.
2544 * Channel list follows immediately after probe request frame.
2545 * Number of channels in list is specified by channel_count.
2546 * Each channel in list is of type:
2547 *
2548 * struct iwl3945_scan_channel channels[0];
2549 *
2550 * NOTE: Only one band of channels can be scanned per pass. You
2551 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2552 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2553 * before requesting another scan.
2554 */
2555 u8 data[0];
2556} __packed;
2557
2558struct iwl_scan_cmd {
2559 __le16 len;
2560 u8 reserved0;
2561 u8 channel_count; /* # channels in channel list */
2562 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2563 * (only for active scan) */
2564 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2565 __le16 good_CRC_th; /* passive -> active promotion threshold */
2566 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2567 __le32 max_out_time; /* max usec to be away from associated (service)
2568 * channel */
2569 __le32 suspend_time; /* pause scan this long (in "extended beacon
2570 * format") when returning to service chnl:
2571 * 3945; 31:24 # beacons, 19:0 additional usec,
2572 * 4965; 31:22 # beacons, 21:0 additional usec.
2573 */
2574 __le32 flags; /* RXON_FLG_* */
2575 __le32 filter_flags; /* RXON_FILTER_* */
2576
2577 /* For active scans (set to all-0s for passive scans).
2578 * Does not include payload. Must specify Tx rate; no rate scaling. */
2579 struct iwl_tx_cmd tx_cmd;
2580
2581 /* For directed active scans (set to all-0s otherwise) */
2582 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2583
2584 /*
2585 * Probe request frame, followed by channel list.
2586 *
2587 * Size of probe request frame is specified by byte count in tx_cmd.
2588 * Channel list follows immediately after probe request frame.
2589 * Number of channels in list is specified by channel_count.
2590 * Each channel in list is of type:
2591 *
2592 * struct iwl_scan_channel channels[0];
2593 *
2594 * NOTE: Only one band of channels can be scanned per pass. You
2595 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2596 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2597 * before requesting another scan.
2598 */
2599 u8 data[0];
2600} __packed;
2601
2602/* Can abort will notify by complete notification with abort status. */
2603#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2604/* complete notification statuses */
2605#define ABORT_STATUS 0x2
2606
2607/*
2608 * REPLY_SCAN_CMD = 0x80 (response)
2609 */
2610struct iwl_scanreq_notification {
2611 __le32 status; /* 1: okay, 2: cannot fulfill request */
2612} __packed;
2613
2614/*
2615 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2616 */
2617struct iwl_scanstart_notification {
2618 __le32 tsf_low;
2619 __le32 tsf_high;
2620 __le32 beacon_timer;
2621 u8 channel;
2622 u8 band;
2623 u8 reserved[2];
2624 __le32 status;
2625} __packed;
2626
2627#define SCAN_OWNER_STATUS 0x1;
2628#define MEASURE_OWNER_STATUS 0x2;
2629
2630#define IWL_PROBE_STATUS_OK 0
2631#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2632/* error statuses combined with TX_FAILED */
2633#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2634#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2635
2636#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2637/*
2638 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2639 */
2640struct iwl_scanresults_notification {
2641 u8 channel;
2642 u8 band;
2643 u8 probe_status;
2644 u8 num_probe_not_sent; /* not enough time to send */
2645 __le32 tsf_low;
2646 __le32 tsf_high;
2647 __le32 statistics[NUMBER_OF_STATISTICS];
2648} __packed;
2649
2650/*
2651 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2652 */
2653struct iwl_scancomplete_notification {
2654 u8 scanned_channels;
2655 u8 status;
2656 u8 last_channel;
2657 __le32 tsf_low;
2658 __le32 tsf_high;
2659} __packed;
2660
2661
2662/******************************************************************************
2663 * (9)
2664 * IBSS/AP Commands and Notifications:
2665 *
2666 *****************************************************************************/
2667
2668enum iwl_ibss_manager {
2669 IWL_NOT_IBSS_MANAGER = 0,
2670 IWL_IBSS_MANAGER = 1,
2671};
2672
2673/*
2674 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2675 */
2676
2677struct iwl3945_beacon_notif {
2678 struct iwl3945_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf;
2680 __le32 high_tsf;
2681 __le32 ibss_mgr_status;
2682} __packed;
2683
2684struct iwl4965_beacon_notif {
2685 struct iwl4965_tx_resp beacon_notify_hdr;
2686 __le32 low_tsf;
2687 __le32 high_tsf;
2688 __le32 ibss_mgr_status;
2689} __packed;
2690
2691/*
2692 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2693 */
2694
2695struct iwl3945_tx_beacon_cmd {
2696 struct iwl3945_tx_cmd tx;
2697 __le16 tim_idx;
2698 u8 tim_size;
2699 u8 reserved1;
2700 struct ieee80211_hdr frame[0]; /* beacon frame */
2701} __packed;
2702
2703struct iwl_tx_beacon_cmd {
2704 struct iwl_tx_cmd tx;
2705 __le16 tim_idx;
2706 u8 tim_size;
2707 u8 reserved1;
2708 struct ieee80211_hdr frame[0]; /* beacon frame */
2709} __packed;
2710
2711/******************************************************************************
2712 * (10)
2713 * Statistics Commands and Notifications:
2714 *
2715 *****************************************************************************/
2716
2717#define IWL_TEMP_CONVERT 260
2718
2719#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2720#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2721#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2722
2723/* Used for passing to driver number of successes and failures per rate */
2724struct rate_histogram {
2725 union {
2726 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2727 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2728 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2729 } success;
2730 union {
2731 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2732 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2733 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2734 } failed;
2735} __packed;
2736
2737/* statistics command response */
2738
2739struct iwl39_statistics_rx_phy {
2740 __le32 ina_cnt;
2741 __le32 fina_cnt;
2742 __le32 plcp_err;
2743 __le32 crc32_err;
2744 __le32 overrun_err;
2745 __le32 early_overrun_err;
2746 __le32 crc32_good;
2747 __le32 false_alarm_cnt;
2748 __le32 fina_sync_err_cnt;
2749 __le32 sfd_timeout;
2750 __le32 fina_timeout;
2751 __le32 unresponded_rts;
2752 __le32 rxe_frame_limit_overrun;
2753 __le32 sent_ack_cnt;
2754 __le32 sent_cts_cnt;
2755} __packed;
2756
2757struct iwl39_statistics_rx_non_phy {
2758 __le32 bogus_cts; /* CTS received when not expecting CTS */
2759 __le32 bogus_ack; /* ACK received when not expecting ACK */
2760 __le32 non_bssid_frames; /* number of frames with BSSID that
2761 * doesn't belong to the STA BSSID */
2762 __le32 filtered_frames; /* count frames that were dumped in the
2763 * filtering process */
2764 __le32 non_channel_beacons; /* beacons with our bss id but not on
2765 * our serving channel */
2766} __packed;
2767
2768struct iwl39_statistics_rx {
2769 struct iwl39_statistics_rx_phy ofdm;
2770 struct iwl39_statistics_rx_phy cck;
2771 struct iwl39_statistics_rx_non_phy general;
2772} __packed;
2773
2774struct iwl39_statistics_tx {
2775 __le32 preamble_cnt;
2776 __le32 rx_detected_cnt;
2777 __le32 bt_prio_defer_cnt;
2778 __le32 bt_prio_kill_cnt;
2779 __le32 few_bytes_cnt;
2780 __le32 cts_timeout;
2781 __le32 ack_timeout;
2782 __le32 expected_ack_cnt;
2783 __le32 actual_ack_cnt;
2784} __packed;
2785
2786struct statistics_dbg {
2787 __le32 burst_check;
2788 __le32 burst_count;
2789 __le32 wait_for_silence_timeout_cnt;
2790 __le32 reserved[3];
2791} __packed;
2792
2793struct iwl39_statistics_div {
2794 __le32 tx_on_a;
2795 __le32 tx_on_b;
2796 __le32 exec_time;
2797 __le32 probe_time;
2798} __packed;
2799
2800struct iwl39_statistics_general {
2801 __le32 temperature;
2802 struct statistics_dbg dbg;
2803 __le32 sleep_time;
2804 __le32 slots_out;
2805 __le32 slots_idle;
2806 __le32 ttl_timestamp;
2807 struct iwl39_statistics_div div;
2808} __packed;
2809
2810struct statistics_rx_phy {
2811 __le32 ina_cnt;
2812 __le32 fina_cnt;
2813 __le32 plcp_err;
2814 __le32 crc32_err;
2815 __le32 overrun_err;
2816 __le32 early_overrun_err;
2817 __le32 crc32_good;
2818 __le32 false_alarm_cnt;
2819 __le32 fina_sync_err_cnt;
2820 __le32 sfd_timeout;
2821 __le32 fina_timeout;
2822 __le32 unresponded_rts;
2823 __le32 rxe_frame_limit_overrun;
2824 __le32 sent_ack_cnt;
2825 __le32 sent_cts_cnt;
2826 __le32 sent_ba_rsp_cnt;
2827 __le32 dsp_self_kill;
2828 __le32 mh_format_err;
2829 __le32 re_acq_main_rssi_sum;
2830 __le32 reserved3;
2831} __packed;
2832
2833struct statistics_rx_ht_phy {
2834 __le32 plcp_err;
2835 __le32 overrun_err;
2836 __le32 early_overrun_err;
2837 __le32 crc32_good;
2838 __le32 crc32_err;
2839 __le32 mh_format_err;
2840 __le32 agg_crc32_good;
2841 __le32 agg_mpdu_cnt;
2842 __le32 agg_cnt;
2843 __le32 unsupport_mcs;
2844} __packed;
2845
2846#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2847
2848struct statistics_rx_non_phy {
2849 __le32 bogus_cts; /* CTS received when not expecting CTS */
2850 __le32 bogus_ack; /* ACK received when not expecting ACK */
2851 __le32 non_bssid_frames; /* number of frames with BSSID that
2852 * doesn't belong to the STA BSSID */
2853 __le32 filtered_frames; /* count frames that were dumped in the
2854 * filtering process */
2855 __le32 non_channel_beacons; /* beacons with our bss id but not on
2856 * our serving channel */
2857 __le32 channel_beacons; /* beacons with our bss id and in our
2858 * serving channel */
2859 __le32 num_missed_bcon; /* number of missed beacons */
2860 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2861 * ADC was in saturation */
2862 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2863 * for INA */
2864 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2865 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2866 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2867 __le32 interference_data_flag; /* flag for interference data
2868 * availability. 1 when data is
2869 * available. */
2870 __le32 channel_load; /* counts RX Enable time in uSec */
2871 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2872 * and CCK) counter */
2873 __le32 beacon_rssi_a;
2874 __le32 beacon_rssi_b;
2875 __le32 beacon_rssi_c;
2876 __le32 beacon_energy_a;
2877 __le32 beacon_energy_b;
2878 __le32 beacon_energy_c;
2879} __packed;
2880
2881struct statistics_rx {
2882 struct statistics_rx_phy ofdm;
2883 struct statistics_rx_phy cck;
2884 struct statistics_rx_non_phy general;
2885 struct statistics_rx_ht_phy ofdm_ht;
2886} __packed;
2887
2888/**
2889 * struct statistics_tx_power - current tx power
2890 *
2891 * @ant_a: current tx power on chain a in 1/2 dB step
2892 * @ant_b: current tx power on chain b in 1/2 dB step
2893 * @ant_c: current tx power on chain c in 1/2 dB step
2894 */
2895struct statistics_tx_power {
2896 u8 ant_a;
2897 u8 ant_b;
2898 u8 ant_c;
2899 u8 reserved;
2900} __packed;
2901
2902struct statistics_tx_non_phy_agg {
2903 __le32 ba_timeout;
2904 __le32 ba_reschedule_frames;
2905 __le32 scd_query_agg_frame_cnt;
2906 __le32 scd_query_no_agg;
2907 __le32 scd_query_agg;
2908 __le32 scd_query_mismatch;
2909 __le32 frame_not_ready;
2910 __le32 underrun;
2911 __le32 bt_prio_kill;
2912 __le32 rx_ba_rsp_cnt;
2913} __packed;
2914
2915struct statistics_tx {
2916 __le32 preamble_cnt;
2917 __le32 rx_detected_cnt;
2918 __le32 bt_prio_defer_cnt;
2919 __le32 bt_prio_kill_cnt;
2920 __le32 few_bytes_cnt;
2921 __le32 cts_timeout;
2922 __le32 ack_timeout;
2923 __le32 expected_ack_cnt;
2924 __le32 actual_ack_cnt;
2925 __le32 dump_msdu_cnt;
2926 __le32 burst_abort_next_frame_mismatch_cnt;
2927 __le32 burst_abort_missing_next_frame_cnt;
2928 __le32 cts_timeout_collision;
2929 __le32 ack_or_ba_timeout_collision;
2930 struct statistics_tx_non_phy_agg agg;
2931
2932 __le32 reserved1;
2933} __packed;
2934
2935
2936struct statistics_div {
2937 __le32 tx_on_a;
2938 __le32 tx_on_b;
2939 __le32 exec_time;
2940 __le32 probe_time;
2941 __le32 reserved1;
2942 __le32 reserved2;
2943} __packed;
2944
2945struct statistics_general_common {
2946 __le32 temperature; /* radio temperature */
2947 struct statistics_dbg dbg;
2948 __le32 sleep_time;
2949 __le32 slots_out;
2950 __le32 slots_idle;
2951 __le32 ttl_timestamp;
2952 struct statistics_div div;
2953 __le32 rx_enable_counter;
2954 /*
2955 * num_of_sos_states:
2956 * count the number of times we have to re-tune
2957 * in order to get out of bad PHY status
2958 */
2959 __le32 num_of_sos_states;
2960} __packed;
2961
2962struct statistics_general {
2963 struct statistics_general_common common;
2964 __le32 reserved2;
2965 __le32 reserved3;
2966} __packed;
2967
2968#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2969#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2970#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2971
2972/*
2973 * REPLY_STATISTICS_CMD = 0x9c,
2974 * all devices identical.
2975 *
2976 * This command triggers an immediate response containing uCode statistics.
2977 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2978 *
2979 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2980 * internal copy of the statistics (counters) after issuing the response.
2981 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2982 *
2983 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2984 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2985 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2986 */
2987#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2988#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2989struct iwl_statistics_cmd {
2990 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2991} __packed;
2992
2993/*
2994 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2995 *
2996 * By default, uCode issues this notification after receiving a beacon
2997 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2998 * REPLY_STATISTICS_CMD 0x9c, above.
2999 *
3000 * Statistics counters continue to increment beacon after beacon, but are
3001 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
3002 * 0x9c with CLEAR_STATS bit set (see above).
3003 *
3004 * uCode also issues this notification during scans. uCode clears statistics
3005 * appropriately so that each notification contains statistics for only the
3006 * one channel that has just been scanned.
3007 */
3008#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3009#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3010
3011struct iwl3945_notif_statistics {
3012 __le32 flag;
3013 struct iwl39_statistics_rx rx;
3014 struct iwl39_statistics_tx tx;
3015 struct iwl39_statistics_general general;
3016} __packed;
3017
3018struct iwl_notif_statistics {
3019 __le32 flag;
3020 struct statistics_rx rx;
3021 struct statistics_tx tx;
3022 struct statistics_general general;
3023} __packed;
3024
3025/*
3026 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3027 *
3028 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3029 * in regardless of how many missed beacons, which mean when driver receive the
3030 * notification, inside the command, it can find all the beacons information
3031 * which include number of total missed beacons, number of consecutive missed
3032 * beacons, number of beacons received and number of beacons expected to
3033 * receive.
3034 *
3035 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3036 * in order to bring the radio/PHY back to working state; which has no relation
3037 * to when driver will perform sensitivity calibration.
3038 *
3039 * Driver should set it own missed_beacon_threshold to decide when to perform
3040 * sensitivity calibration based on number of consecutive missed beacons in
3041 * order to improve overall performance, especially in noisy environment.
3042 *
3043 */
3044
3045#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3046#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3047#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3048
3049struct iwl_missed_beacon_notif {
3050 __le32 consecutive_missed_beacons;
3051 __le32 total_missed_becons;
3052 __le32 num_expected_beacons;
3053 __le32 num_recvd_beacons;
3054} __packed;
3055
3056
3057/******************************************************************************
3058 * (11)
3059 * Rx Calibration Commands:
3060 *
3061 * With the uCode used for open source drivers, most Tx calibration (except
3062 * for Tx Power) and most Rx calibration is done by uCode during the
3063 * "initialize" phase of uCode boot. Driver must calibrate only:
3064 *
3065 * 1) Tx power (depends on temperature), described elsewhere
3066 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3067 * 3) Receiver sensitivity (to optimize signal detection)
3068 *
3069 *****************************************************************************/
3070
3071/**
3072 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
3073 *
3074 * This command sets up the Rx signal detector for a sensitivity level that
3075 * is high enough to lock onto all signals within the associated network,
3076 * but low enough to ignore signals that are below a certain threshold, so as
3077 * not to have too many "false alarms". False alarms are signals that the
3078 * Rx DSP tries to lock onto, but then discards after determining that they
3079 * are noise.
3080 *
3081 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3082 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3083 * time listening, not transmitting). Driver must adjust sensitivity so that
3084 * the ratio of actual false alarms to actual Rx time falls within this range.
3085 *
3086 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
3087 * received beacon. These provide information to the driver to analyze the
3088 * sensitivity. Don't analyze statistics that come in from scanning, or any
3089 * other non-associated-network source. Pertinent statistics include:
3090 *
3091 * From "general" statistics (struct statistics_rx_non_phy):
3092 *
3093 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3094 * Measure of energy of desired signal. Used for establishing a level
3095 * below which the device does not detect signals.
3096 *
3097 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3098 * Measure of background noise in silent period after beacon.
3099 *
3100 * channel_load
3101 * uSecs of actual Rx time during beacon period (varies according to
3102 * how much time was spent transmitting).
3103 *
3104 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
3105 *
3106 * false_alarm_cnt
3107 * Signal locks abandoned early (before phy-level header).
3108 *
3109 * plcp_err
3110 * Signal locks abandoned late (during phy-level header).
3111 *
3112 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3113 * beacon to beacon, i.e. each value is an accumulation of all errors
3114 * before and including the latest beacon. Values will wrap around to 0
3115 * after counting up to 2^32 - 1. Driver must differentiate vs.
3116 * previous beacon's values to determine # false alarms in the current
3117 * beacon period.
3118 *
3119 * Total number of false alarms = false_alarms + plcp_errs
3120 *
3121 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
3122 * (notice that the start points for OFDM are at or close to settings for
3123 * maximum sensitivity):
3124 *
3125 * START / MIN / MAX
3126 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
3127 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
3128 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
3129 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
3130 *
3131 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3132 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3133 * by *adding* 1 to all 4 of the table entries above, up to the max for
3134 * each entry. Conversely, if false alarm rate is too low (less than 5
3135 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3136 * increase sensitivity.
3137 *
3138 * For CCK sensitivity, keep track of the following:
3139 *
3140 * 1). 20-beacon history of maximum background noise, indicated by
3141 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3142 * 3 receivers. For any given beacon, the "silence reference" is
3143 * the maximum of last 60 samples (20 beacons * 3 receivers).
3144 *
3145 * 2). 10-beacon history of strongest signal level, as indicated
3146 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3147 * i.e. the strength of the signal through the best receiver at the
3148 * moment. These measurements are "upside down", with lower values
3149 * for stronger signals, so max energy will be *minimum* value.
3150 *
3151 * Then for any given beacon, the driver must determine the *weakest*
3152 * of the strongest signals; this is the minimum level that needs to be
3153 * successfully detected, when using the best receiver at the moment.
3154 * "Max cck energy" is the maximum (higher value means lower energy!)
3155 * of the last 10 minima. Once this is determined, driver must add
3156 * a little margin by adding "6" to it.
3157 *
3158 * 3). Number of consecutive beacon periods with too few false alarms.
3159 * Reset this to 0 at the first beacon period that falls within the
3160 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3161 *
3162 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
3163 * (notice that the start points for CCK are at maximum sensitivity):
3164 *
3165 * START / MIN / MAX
3166 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
3167 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
3168 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
3169 *
3170 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3171 * (greater than 50 for each 204.8 msecs listening), method for reducing
3172 * sensitivity is:
3173 *
3174 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3175 * up to max 400.
3176 *
3177 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
3178 * sensitivity has been reduced a significant amount; bring it up to
3179 * a moderate 161. Otherwise, *add* 3, up to max 200.
3180 *
3181 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
3182 * sensitivity has been reduced only a moderate or small amount;
3183 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
3184 * down to min 0. Otherwise (if gain has been significantly reduced),
3185 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
3186 *
3187 * b) Save a snapshot of the "silence reference".
3188 *
3189 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3190 * (less than 5 for each 204.8 msecs listening), method for increasing
3191 * sensitivity is used only if:
3192 *
3193 * 1a) Previous beacon did not have too many false alarms
3194 * 1b) AND difference between previous "silence reference" and current
3195 * "silence reference" (prev - current) is 2 or more,
3196 * OR 2) 100 or more consecutive beacon periods have had rate of
3197 * less than 5 false alarms per 204.8 milliseconds rx time.
3198 *
3199 * Method for increasing sensitivity:
3200 *
3201 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3202 * down to min 125.
3203 *
3204 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3205 * down to min 200.
3206 *
3207 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3208 *
3209 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3210 * (between 5 and 50 for each 204.8 msecs listening):
3211 *
3212 * 1) Save a snapshot of the silence reference.
3213 *
3214 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3215 * give some extra margin to energy threshold by *subtracting* 8
3216 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3217 *
3218 * For all cases (too few, too many, good range), make sure that the CCK
3219 * detection threshold (energy) is below the energy level for robust
3220 * detection over the past 10 beacon periods, the "Max cck energy".
3221 * Lower values mean higher energy; this means making sure that the value
3222 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3223 *
3224 */
3225
3226/*
3227 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3228 */
3229#define HD_TABLE_SIZE (11) /* number of entries */
3230#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3231#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3232#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3233#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3234#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3235#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3236#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3237#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3238#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3239#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3240#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3241
3242/* Control field in struct iwl_sensitivity_cmd */
3243#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3244#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3245
3246/**
3247 * struct iwl_sensitivity_cmd
3248 * @control: (1) updates working table, (0) updates default table
3249 * @table: energy threshold values, use HD_* as index into table
3250 *
3251 * Always use "1" in "control" to update uCode's working table and DSP.
3252 */
3253struct iwl_sensitivity_cmd {
3254 __le16 control; /* always use "1" */
3255 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3256} __packed;
3257
3258
3259/**
3260 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3261 *
3262 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3263 *
3264 * After the first association, driver should accumulate signal and noise
3265 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3266 * beacons from the associated network (don't collect statistics that come
3267 * in from scanning, or any other non-network source).
3268 *
3269 * DISCONNECTED ANTENNA:
3270 *
3271 * Driver should determine which antennas are actually connected, by comparing
3272 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3273 * following values over 20 beacons, one accumulator for each of the chains
3274 * a/b/c, from struct statistics_rx_non_phy:
3275 *
3276 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3277 *
3278 * Find the strongest signal from among a/b/c. Compare the other two to the
3279 * strongest. If any signal is more than 15 dB (times 20, unless you
3280 * divide the accumulated values by 20) below the strongest, the driver
3281 * considers that antenna to be disconnected, and should not try to use that
3282 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3283 * driver should declare the stronger one as connected, and attempt to use it
3284 * (A and B are the only 2 Tx chains!).
3285 *
3286 *
3287 * RX BALANCE:
3288 *
3289 * Driver should balance the 3 receivers (but just the ones that are connected
3290 * to antennas, see above) for gain, by comparing the average signal levels
3291 * detected during the silence after each beacon (background noise).
3292 * Accumulate (add) the following values over 20 beacons, one accumulator for
3293 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3294 *
3295 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3296 *
3297 * Find the weakest background noise level from among a/b/c. This Rx chain
3298 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3299 * finding noise difference:
3300 *
3301 * (accum_noise[i] - accum_noise[reference]) / 30
3302 *
3303 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3304 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3305 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3306 * and set bit 2 to indicate "reduce gain". The value for the reference
3307 * (weakest) chain should be "0".
3308 *
3309 * diff_gain_[abc] bit fields:
3310 * 2: (1) reduce gain, (0) increase gain
3311 * 1-0: amount of gain, units of 1.5 dB
3312 */
3313
3314/* Phy calibration command for series */
3315/* The default calibrate table size if not specified by firmware */
3316#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3317enum {
3318 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3319 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3320};
3321
3322#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3323
3324struct iwl_calib_hdr {
3325 u8 op_code;
3326 u8 first_group;
3327 u8 groups_num;
3328 u8 data_valid;
3329} __packed;
3330
3331/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3332struct iwl_calib_diff_gain_cmd {
3333 struct iwl_calib_hdr hdr;
3334 s8 diff_gain_a; /* see above */
3335 s8 diff_gain_b;
3336 s8 diff_gain_c;
3337 u8 reserved1;
3338} __packed;
3339
3340/******************************************************************************
3341 * (12)
3342 * Miscellaneous Commands:
3343 *
3344 *****************************************************************************/
3345
3346/*
3347 * LEDs Command & Response
3348 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3349 *
3350 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3351 * this command turns it on or off, or sets up a periodic blinking cycle.
3352 */
3353struct iwl_led_cmd {
3354 __le32 interval; /* "interval" in uSec */
3355 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3356 u8 off; /* # intervals off while blinking;
3357 * "0", with >0 "on" value, turns LED on */
3358 u8 on; /* # intervals on while blinking;
3359 * "0", regardless of "off", turns LED off */
3360 u8 reserved;
3361} __packed;
3362
3363
3364/******************************************************************************
3365 * (13)
3366 * Union of all expected notifications/responses:
3367 *
3368 *****************************************************************************/
3369
3370struct iwl_rx_packet {
3371 /*
3372 * The first 4 bytes of the RX frame header contain both the RX frame
3373 * size and some flags.
3374 * Bit fields:
3375 * 31: flag flush RB request
3376 * 30: flag ignore TC (terminal counter) request
3377 * 29: flag fast IRQ request
3378 * 28-14: Reserved
3379 * 13-00: RX frame size
3380 */
3381 __le32 len_n_flags;
3382 struct iwl_cmd_header hdr;
3383 union {
3384 struct iwl3945_rx_frame rx_frame;
3385 struct iwl3945_tx_resp tx_resp;
3386 struct iwl3945_beacon_notif beacon_status;
3387
3388 struct iwl_alive_resp alive_frame;
3389 struct iwl_spectrum_notification spectrum_notif;
3390 struct iwl_csa_notification csa_notif;
3391 struct iwl_error_resp err_resp;
3392 struct iwl_card_state_notif card_state_notif;
3393 struct iwl_add_sta_resp add_sta;
3394 struct iwl_rem_sta_resp rem_sta;
3395 struct iwl_sleep_notification sleep_notif;
3396 struct iwl_spectrum_resp spectrum;
3397 struct iwl_notif_statistics stats;
3398 struct iwl_compressed_ba_resp compressed_ba;
3399 struct iwl_missed_beacon_notif missed_beacon;
3400 __le32 status;
3401 u8 raw[0];
3402 } u;
3403} __packed;
3404
3405#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..d418b647be80
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2674 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163
164 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
165 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
166 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
167 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
168 return 0;
169 }
170
171 channels = kzalloc(sizeof(struct ieee80211_channel) *
172 priv->channel_count, GFP_KERNEL);
173 if (!channels)
174 return -ENOMEM;
175
176 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
177 GFP_KERNEL);
178 if (!rates) {
179 kfree(channels);
180 return -ENOMEM;
181 }
182
183 /* 5.2GHz channels start after the 2.4GHz channels */
184 sband = &priv->bands[IEEE80211_BAND_5GHZ];
185 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
186 /* just OFDM */
187 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
188 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
189
190 if (priv->cfg->sku & IWL_SKU_N)
191 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
192 IEEE80211_BAND_5GHZ);
193
194 sband = &priv->bands[IEEE80211_BAND_2GHZ];
195 sband->channels = channels;
196 /* OFDM & CCK */
197 sband->bitrates = rates;
198 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
199
200 if (priv->cfg->sku & IWL_SKU_N)
201 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
202 IEEE80211_BAND_2GHZ);
203
204 priv->ieee_channels = channels;
205 priv->ieee_rates = rates;
206
207 for (i = 0; i < priv->channel_count; i++) {
208 ch = &priv->channel_info[i];
209
210 if (!iwl_legacy_is_channel_valid(ch))
211 continue;
212
213 if (iwl_legacy_is_channel_a_band(ch))
214 sband = &priv->bands[IEEE80211_BAND_5GHZ];
215 else
216 sband = &priv->bands[IEEE80211_BAND_2GHZ];
217
218 geo_ch = &sband->channels[sband->n_channels++];
219
220 geo_ch->center_freq =
221 ieee80211_channel_to_frequency(ch->channel, ch->band);
222 geo_ch->max_power = ch->max_power_avg;
223 geo_ch->max_antenna_gain = 0xff;
224 geo_ch->hw_value = ch->channel;
225
226 if (iwl_legacy_is_channel_valid(ch)) {
227 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
228 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
229
230 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
231 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
232
233 if (ch->flags & EEPROM_CHANNEL_RADAR)
234 geo_ch->flags |= IEEE80211_CHAN_RADAR;
235
236 geo_ch->flags |= ch->ht40_extension_channel;
237
238 if (ch->max_power_avg > priv->tx_power_device_lmt)
239 priv->tx_power_device_lmt = ch->max_power_avg;
240 } else {
241 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
242 }
243
244 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
245 ch->channel, geo_ch->center_freq,
246 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
247 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
248 "restricted" : "valid",
249 geo_ch->flags);
250 }
251
252 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
253 priv->cfg->sku & IWL_SKU_A) {
254 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
255 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
256 priv->pci_dev->device,
257 priv->pci_dev->subsystem_device);
258 priv->cfg->sku &= ~IWL_SKU_A;
259 }
260
261 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
262 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
263 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
264
265 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
266
267 return 0;
268}
269EXPORT_SYMBOL(iwl_legacy_init_geos);
270
271/*
272 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
273 */
274void iwl_legacy_free_geos(struct iwl_priv *priv)
275{
276 kfree(priv->ieee_channels);
277 kfree(priv->ieee_rates);
278 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
279}
280EXPORT_SYMBOL(iwl_legacy_free_geos);
281
282static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
283 enum ieee80211_band band,
284 u16 channel, u8 extension_chan_offset)
285{
286 const struct iwl_channel_info *ch_info;
287
288 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
289 if (!iwl_legacy_is_channel_valid(ch_info))
290 return false;
291
292 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
293 return !(ch_info->ht40_extension_channel &
294 IEEE80211_CHAN_NO_HT40PLUS);
295 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
296 return !(ch_info->ht40_extension_channel &
297 IEEE80211_CHAN_NO_HT40MINUS);
298
299 return false;
300}
301
302bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 struct ieee80211_sta_ht_cap *ht_cap)
305{
306 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
307 return false;
308
309 /*
310 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
311 * the bit will not set if it is pure 40MHz case
312 */
313 if (ht_cap && !ht_cap->ht_supported)
314 return false;
315
316#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
317 if (priv->disable_ht40)
318 return false;
319#endif
320
321 return iwl_legacy_is_channel_extension(priv, priv->band,
322 le16_to_cpu(ctx->staging.channel),
323 ctx->ht.extension_chan_offset);
324}
325EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
326
327static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
328{
329 u16 new_val;
330 u16 beacon_factor;
331
332 /*
333 * If mac80211 hasn't given us a beacon interval, program
334 * the default into the device.
335 */
336 if (!beacon_val)
337 return DEFAULT_BEACON_INTERVAL;
338
339 /*
340 * If the beacon interval we obtained from the peer
341 * is too large, we'll have to wake up more often
342 * (and in IBSS case, we'll beacon too much)
343 *
344 * For example, if max_beacon_val is 4096, and the
345 * requested beacon interval is 7000, we'll have to
346 * use 3500 to be able to wake up on the beacons.
347 *
348 * This could badly influence beacon detection stats.
349 */
350
351 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
352 new_val = beacon_val / beacon_factor;
353
354 if (!new_val)
355 new_val = max_beacon_val;
356
357 return new_val;
358}
359
360int
361iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
362{
363 u64 tsf;
364 s32 interval_tm, rem;
365 struct ieee80211_conf *conf = NULL;
366 u16 beacon_int;
367 struct ieee80211_vif *vif = ctx->vif;
368
369 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
370
371 lockdep_assert_held(&priv->mutex);
372
373 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
374
375 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
376 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
377
378 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
379
380 /*
381 * TODO: For IBSS we need to get atim_window from mac80211,
382 * for now just always use 0
383 */
384 ctx->timing.atim_window = 0;
385
386 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
387 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
388 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
389
390 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
391 interval_tm = beacon_int * TIME_UNIT;
392 rem = do_div(tsf, interval_tm);
393 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
394
395 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
396
397 IWL_DEBUG_ASSOC(priv,
398 "beacon interval %d beacon timer %d beacon tim %d\n",
399 le16_to_cpu(ctx->timing.beacon_interval),
400 le32_to_cpu(ctx->timing.beacon_init_val),
401 le16_to_cpu(ctx->timing.atim_window));
402
403 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
404 sizeof(ctx->timing), &ctx->timing);
405}
406EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
407
408void
409iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
410 struct iwl_rxon_context *ctx,
411 int hw_decrypt)
412{
413 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
414
415 if (hw_decrypt)
416 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
417 else
418 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
419
420}
421EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
422
423/* validate RXON structure is valid */
424int
425iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
426{
427 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
428 bool error = false;
429
430 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
431 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
432 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
433 error = true;
434 }
435 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
436 IWL_WARN(priv, "check 2.4G: wrong radar\n");
437 error = true;
438 }
439 } else {
440 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
441 IWL_WARN(priv, "check 5.2G: not short slot!\n");
442 error = true;
443 }
444 if (rxon->flags & RXON_FLG_CCK_MSK) {
445 IWL_WARN(priv, "check 5.2G: CCK!\n");
446 error = true;
447 }
448 }
449 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
450 IWL_WARN(priv, "mac/bssid mcast!\n");
451 error = true;
452 }
453
454 /* make sure basic rates 6Mbps and 1Mbps are supported */
455 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
456 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
457 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
458 error = true;
459 }
460
461 if (le16_to_cpu(rxon->assoc_id) > 2007) {
462 IWL_WARN(priv, "aid > 2007\n");
463 error = true;
464 }
465
466 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
467 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
468 IWL_WARN(priv, "CCK and short slot\n");
469 error = true;
470 }
471
472 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
473 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
474 IWL_WARN(priv, "CCK and auto detect");
475 error = true;
476 }
477
478 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
479 RXON_FLG_TGG_PROTECT_MSK)) ==
480 RXON_FLG_TGG_PROTECT_MSK) {
481 IWL_WARN(priv, "TGg but no auto-detect\n");
482 error = true;
483 }
484
485 if (error)
486 IWL_WARN(priv, "Tuning to channel %d\n",
487 le16_to_cpu(rxon->channel));
488
489 if (error) {
490 IWL_ERR(priv, "Invalid RXON\n");
491 return -EINVAL;
492 }
493 return 0;
494}
495EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
496
497/**
498 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
499 * @priv: staging_rxon is compared to active_rxon
500 *
501 * If the RXON structure is changing enough to require a new tune,
502 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
503 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
504 */
505int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
506 struct iwl_rxon_context *ctx)
507{
508 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
509 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
510
511#define CHK(cond) \
512 if ((cond)) { \
513 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
514 return 1; \
515 }
516
517#define CHK_NEQ(c1, c2) \
518 if ((c1) != (c2)) { \
519 IWL_DEBUG_INFO(priv, "need full RXON - " \
520 #c1 " != " #c2 " - %d != %d\n", \
521 (c1), (c2)); \
522 return 1; \
523 }
524
525 /* These items are only settable from the full RXON command */
526 CHK(!iwl_legacy_is_associated_ctx(ctx));
527 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
528 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
529 CHK(compare_ether_addr(staging->wlap_bssid_addr,
530 active->wlap_bssid_addr));
531 CHK_NEQ(staging->dev_type, active->dev_type);
532 CHK_NEQ(staging->channel, active->channel);
533 CHK_NEQ(staging->air_propagation, active->air_propagation);
534 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
535 active->ofdm_ht_single_stream_basic_rates);
536 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
537 active->ofdm_ht_dual_stream_basic_rates);
538 CHK_NEQ(staging->assoc_id, active->assoc_id);
539
540 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
541 * be updated with the RXON_ASSOC command -- however only some
542 * flag transitions are allowed using RXON_ASSOC */
543
544 /* Check if we are not switching bands */
545 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
546 active->flags & RXON_FLG_BAND_24G_MSK);
547
548 /* Check if we are switching association toggle */
549 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
550 active->filter_flags & RXON_FILTER_ASSOC_MSK);
551
552#undef CHK
553#undef CHK_NEQ
554
555 return 0;
556}
557EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
558
559u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
560 struct iwl_rxon_context *ctx)
561{
562 /*
563 * Assign the lowest rate -- should really get this from
564 * the beacon skb from mac80211.
565 */
566 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
567 return IWL_RATE_1M_PLCP;
568 else
569 return IWL_RATE_6M_PLCP;
570}
571EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
572
573static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
574 struct iwl_ht_config *ht_conf,
575 struct iwl_rxon_context *ctx)
576{
577 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
578
579 if (!ctx->ht.enabled) {
580 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
581 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
582 RXON_FLG_HT40_PROT_MSK |
583 RXON_FLG_HT_PROT_MSK);
584 return;
585 }
586
587 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
588 RXON_FLG_HT_OPERATING_MODE_POS);
589
590 /* Set up channel bandwidth:
591 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
592 /* clear the HT channel mode before set the mode */
593 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
594 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
595 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
596 /* pure ht40 */
597 if (ctx->ht.protection ==
598 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
599 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
600 /* Note: control channel is opposite of extension channel */
601 switch (ctx->ht.extension_chan_offset) {
602 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
603 rxon->flags &=
604 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
605 break;
606 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
607 rxon->flags |=
608 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
609 break;
610 }
611 } else {
612 /* Note: control channel is opposite of extension channel */
613 switch (ctx->ht.extension_chan_offset) {
614 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
615 rxon->flags &=
616 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
617 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
618 break;
619 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
620 rxon->flags |=
621 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
622 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
623 break;
624 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
625 default:
626 /* channel location only valid if in Mixed mode */
627 IWL_ERR(priv,
628 "invalid extension channel offset\n");
629 break;
630 }
631 }
632 } else {
633 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
634 }
635
636 if (priv->cfg->ops->hcmd->set_rxon_chain)
637 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
638
639 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
640 "extension channel offset 0x%x\n",
641 le32_to_cpu(rxon->flags), ctx->ht.protection,
642 ctx->ht.extension_chan_offset);
643}
644
645void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
646{
647 struct iwl_rxon_context *ctx;
648
649 for_each_context(priv, ctx)
650 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
651}
652EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
653
654/* Return valid, unused, channel for a passive scan to reset the RF */
655u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
656 enum ieee80211_band band)
657{
658 const struct iwl_channel_info *ch_info;
659 int i;
660 u8 channel = 0;
661 u8 min, max;
662 struct iwl_rxon_context *ctx;
663
664 if (band == IEEE80211_BAND_5GHZ) {
665 min = 14;
666 max = priv->channel_count;
667 } else {
668 min = 0;
669 max = 14;
670 }
671
672 for (i = min; i < max; i++) {
673 bool busy = false;
674
675 for_each_context(priv, ctx) {
676 busy = priv->channel_info[i].channel ==
677 le16_to_cpu(ctx->staging.channel);
678 if (busy)
679 break;
680 }
681
682 if (busy)
683 continue;
684
685 channel = priv->channel_info[i].channel;
686 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
687 if (iwl_legacy_is_channel_valid(ch_info))
688 break;
689 }
690
691 return channel;
692}
693EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
694
695/**
696 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
697 * @ch: requested channel as a pointer to struct ieee80211_channel
698
699 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
700 * in the staging RXON flag structure based on the ch->band
701 */
702int
703iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
704 struct iwl_rxon_context *ctx)
705{
706 enum ieee80211_band band = ch->band;
707 u16 channel = ch->hw_value;
708
709 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
710 (priv->band == band))
711 return 0;
712
713 ctx->staging.channel = cpu_to_le16(channel);
714 if (band == IEEE80211_BAND_5GHZ)
715 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
716 else
717 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
718
719 priv->band = band;
720
721 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
722
723 return 0;
724}
725EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
726
727void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
728 struct iwl_rxon_context *ctx,
729 enum ieee80211_band band,
730 struct ieee80211_vif *vif)
731{
732 if (band == IEEE80211_BAND_5GHZ) {
733 ctx->staging.flags &=
734 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
735 | RXON_FLG_CCK_MSK);
736 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
737 } else {
738 /* Copied from iwl_post_associate() */
739 if (vif && vif->bss_conf.use_short_slot)
740 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
741 else
742 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
743
744 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
745 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
747 }
748}
749EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
750
751/*
752 * initialize rxon structure with default values from eeprom
753 */
754void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
755 struct iwl_rxon_context *ctx)
756{
757 const struct iwl_channel_info *ch_info;
758
759 memset(&ctx->staging, 0, sizeof(ctx->staging));
760
761 if (!ctx->vif) {
762 ctx->staging.dev_type = ctx->unused_devtype;
763 } else
764 switch (ctx->vif->type) {
765
766 case NL80211_IFTYPE_STATION:
767 ctx->staging.dev_type = ctx->station_devtype;
768 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
769 break;
770
771 case NL80211_IFTYPE_ADHOC:
772 ctx->staging.dev_type = ctx->ibss_devtype;
773 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
774 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
775 RXON_FILTER_ACCEPT_GRP_MSK;
776 break;
777
778 default:
779 IWL_ERR(priv, "Unsupported interface type %d\n",
780 ctx->vif->type);
781 break;
782 }
783
784#if 0
785 /* TODO: Figure out when short_preamble would be set and cache from
786 * that */
787 if (!hw_to_local(priv->hw)->short_preamble)
788 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
789 else
790 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
791#endif
792
793 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
794 le16_to_cpu(ctx->active.channel));
795
796 if (!ch_info)
797 ch_info = &priv->channel_info[0];
798
799 ctx->staging.channel = cpu_to_le16(ch_info->channel);
800 priv->band = ch_info->band;
801
802 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
803
804 ctx->staging.ofdm_basic_rates =
805 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
806 ctx->staging.cck_basic_rates =
807 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
808
809 /* clear both MIX and PURE40 mode flag */
810 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
811 RXON_FLG_CHANNEL_MODE_PURE_40);
812 if (ctx->vif)
813 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
814
815 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
816 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
817}
818EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
819
820void iwl_legacy_set_rate(struct iwl_priv *priv)
821{
822 const struct ieee80211_supported_band *hw = NULL;
823 struct ieee80211_rate *rate;
824 struct iwl_rxon_context *ctx;
825 int i;
826
827 hw = iwl_get_hw_mode(priv, priv->band);
828 if (!hw) {
829 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
830 return;
831 }
832
833 priv->active_rate = 0;
834
835 for (i = 0; i < hw->n_bitrates; i++) {
836 rate = &(hw->bitrates[i]);
837 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
838 priv->active_rate |= (1 << rate->hw_value);
839 }
840
841 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
842
843 for_each_context(priv, ctx) {
844 ctx->staging.cck_basic_rates =
845 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
846
847 ctx->staging.ofdm_basic_rates =
848 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
849 }
850}
851EXPORT_SYMBOL(iwl_legacy_set_rate);
852
853void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
854{
855 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
856
857 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
858 return;
859
860 if (priv->switch_rxon.switch_in_progress) {
861 ieee80211_chswitch_done(ctx->vif, is_success);
862 mutex_lock(&priv->mutex);
863 priv->switch_rxon.switch_in_progress = false;
864 mutex_unlock(&priv->mutex);
865 }
866}
867EXPORT_SYMBOL(iwl_legacy_chswitch_done);
868
869void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
870{
871 struct iwl_rx_packet *pkt = rxb_addr(rxb);
872 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
873
874 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
875 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
876
877 if (priv->switch_rxon.switch_in_progress) {
878 if (!le32_to_cpu(csa->status) &&
879 (csa->channel == priv->switch_rxon.channel)) {
880 rxon->channel = csa->channel;
881 ctx->staging.channel = csa->channel;
882 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
883 le16_to_cpu(csa->channel));
884 iwl_legacy_chswitch_done(priv, true);
885 } else {
886 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
887 le16_to_cpu(csa->channel));
888 iwl_legacy_chswitch_done(priv, false);
889 }
890 }
891}
892EXPORT_SYMBOL(iwl_legacy_rx_csa);
893
894#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
895void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
896 struct iwl_rxon_context *ctx)
897{
898 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
899
900 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
901 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
902 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
903 le16_to_cpu(rxon->channel));
904 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
905 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
906 le32_to_cpu(rxon->filter_flags));
907 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
908 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
909 rxon->ofdm_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
911 rxon->cck_basic_rates);
912 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
913 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
914 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
915 le16_to_cpu(rxon->assoc_id));
916}
917EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
918#endif
919/**
920 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
921 */
922void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
923{
924 /* Set the FW error flag -- cleared on iwl_down */
925 set_bit(STATUS_FW_ERROR, &priv->status);
926
927 /* Cancel currently queued command. */
928 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
929
930 IWL_ERR(priv, "Loaded firmware version: %s\n",
931 priv->hw->wiphy->fw_version);
932
933 priv->cfg->ops->lib->dump_nic_error_log(priv);
934 if (priv->cfg->ops->lib->dump_fh)
935 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
936 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
937#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
938 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
939 iwl_legacy_print_rx_config_cmd(priv,
940 &priv->contexts[IWL_RXON_CTX_BSS]);
941#endif
942
943 wake_up_interruptible(&priv->wait_command_queue);
944
945 /* Keep the restart process from trying to send host
946 * commands by clearing the INIT status bit */
947 clear_bit(STATUS_READY, &priv->status);
948
949 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
950 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
951 "Restarting adapter due to uCode error.\n");
952
953 if (priv->cfg->mod_params->restart_fw)
954 queue_work(priv->workqueue, &priv->restart);
955 }
956}
957EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
958
959static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
960{
961 int ret = 0;
962
963 /* stop device's busmaster DMA activity */
964 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
965
966 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
967 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
968 if (ret)
969 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
970
971 IWL_DEBUG_INFO(priv, "stop master\n");
972
973 return ret;
974}
975
976void iwl_legacy_apm_stop(struct iwl_priv *priv)
977{
978 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
979
980 /* Stop device's DMA activity */
981 iwl_legacy_apm_stop_master(priv);
982
983 /* Reset the entire device */
984 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
985
986 udelay(10);
987
988 /*
989 * Clear "initialization complete" bit to move adapter from
990 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
991 */
992 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
993 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
994}
995EXPORT_SYMBOL(iwl_legacy_apm_stop);
996
997
998/*
999 * Start up NIC's basic functionality after it has been reset
1000 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1001 * NOTE: This does not load uCode nor start the embedded processor
1002 */
1003int iwl_legacy_apm_init(struct iwl_priv *priv)
1004{
1005 int ret = 0;
1006 u16 lctl;
1007
1008 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1009
1010 /*
1011 * Use "set_bit" below rather than "write", to preserve any hardware
1012 * bits already set by default after reset.
1013 */
1014
1015 /* Disable L0S exit timer (platform NMI Work/Around) */
1016 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1017 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1018
1019 /*
1020 * Disable L0s without affecting L1;
1021 * don't wait for ICH L0s (ICH bug W/A)
1022 */
1023 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1024 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1025
1026 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1027 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1028 CSR_DBG_HPET_MEM_REG_VAL);
1029
1030 /*
1031 * Enable HAP INTA (interrupt from management bus) to
1032 * wake device's PCI Express link L1a -> L0s
1033 * NOTE: This is no-op for 3945 (non-existant bit)
1034 */
1035 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1036 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1037
1038 /*
1039 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1040 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1041 * If so (likely), disable L0S, so device moves directly L0->L1;
1042 * costs negligible amount of power savings.
1043 * If not (unlikely), enable L0S, so there is at least some
1044 * power savings, even without L1.
1045 */
1046 if (priv->cfg->base_params->set_l0s) {
1047 lctl = iwl_legacy_pcie_link_ctl(priv);
1048 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1049 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1050 /* L1-ASPM enabled; disable(!) L0S */
1051 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1052 CSR_GIO_REG_VAL_L0S_ENABLED);
1053 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1054 } else {
1055 /* L1-ASPM disabled; enable(!) L0S */
1056 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1057 CSR_GIO_REG_VAL_L0S_ENABLED);
1058 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1059 }
1060 }
1061
1062 /* Configure analog phase-lock-loop before activating to D0A */
1063 if (priv->cfg->base_params->pll_cfg_val)
1064 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1065 priv->cfg->base_params->pll_cfg_val);
1066
1067 /*
1068 * Set "initialization complete" bit to move adapter from
1069 * D0U* --> D0A* (powered-up active) state.
1070 */
1071 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1072
1073 /*
1074 * Wait for clock stabilization; once stabilized, access to
1075 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1076 * and accesses to uCode SRAM.
1077 */
1078 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1079 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1080 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1081 if (ret < 0) {
1082 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1083 goto out;
1084 }
1085
1086 /*
1087 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1088 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1089 *
1090 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1091 * do not disable clocks. This preserves any hardware bits already
1092 * set by default in "CLK_CTRL_REG" after reset.
1093 */
1094 if (priv->cfg->base_params->use_bsm)
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1097 else
1098 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1099 APMG_CLK_VAL_DMA_CLK_RQT);
1100 udelay(20);
1101
1102 /* Disable L1-Active */
1103 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1104 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1105
1106out:
1107 return ret;
1108}
1109EXPORT_SYMBOL(iwl_legacy_apm_init);
1110
1111
1112int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1113{
1114 int ret;
1115 s8 prev_tx_power;
1116 bool defer;
1117 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1118
1119 lockdep_assert_held(&priv->mutex);
1120
1121 if (priv->tx_power_user_lmt == tx_power && !force)
1122 return 0;
1123
1124 if (!priv->cfg->ops->lib->send_tx_power)
1125 return -EOPNOTSUPP;
1126
1127 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
1128 IWL_WARN(priv,
1129 "Requested user TXPOWER %d below lower limit %d.\n",
1130 tx_power,
1131 IWL4965_TX_POWER_TARGET_POWER_MIN);
1132 return -EINVAL;
1133 }
1134
1135 if (tx_power > priv->tx_power_device_lmt) {
1136 IWL_WARN(priv,
1137 "Requested user TXPOWER %d above upper limit %d.\n",
1138 tx_power, priv->tx_power_device_lmt);
1139 return -EINVAL;
1140 }
1141
1142 if (!iwl_legacy_is_ready_rf(priv))
1143 return -EIO;
1144
1145 /* scan complete and commit_rxon use tx_power_next value,
1146 * it always need to be updated for newest request */
1147 priv->tx_power_next = tx_power;
1148
1149 /* do not set tx power when scanning or channel changing */
1150 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1151 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1152 if (defer && !force) {
1153 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1154 return 0;
1155 }
1156
1157 prev_tx_power = priv->tx_power_user_lmt;
1158 priv->tx_power_user_lmt = tx_power;
1159
1160 ret = priv->cfg->ops->lib->send_tx_power(priv);
1161
1162 /* if fail to set tx_power, restore the orig. tx power */
1163 if (ret) {
1164 priv->tx_power_user_lmt = prev_tx_power;
1165 priv->tx_power_next = prev_tx_power;
1166 }
1167 return ret;
1168}
1169EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1170
1171void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1172{
1173 struct iwl_bt_cmd bt_cmd = {
1174 .lead_time = BT_LEAD_TIME_DEF,
1175 .max_kill = BT_MAX_KILL_DEF,
1176 .kill_ack_mask = 0,
1177 .kill_cts_mask = 0,
1178 };
1179
1180 if (!bt_coex_active)
1181 bt_cmd.flags = BT_COEX_DISABLE;
1182 else
1183 bt_cmd.flags = BT_COEX_ENABLE;
1184
1185 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1186 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1187
1188 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1189 sizeof(struct iwl_bt_cmd), &bt_cmd))
1190 IWL_ERR(priv, "failed to send BT Coex Config\n");
1191}
1192EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1193
1194int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1195{
1196 struct iwl_statistics_cmd statistics_cmd = {
1197 .configuration_flags =
1198 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1199 };
1200
1201 if (flags & CMD_ASYNC)
1202 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1203 sizeof(struct iwl_statistics_cmd),
1204 &statistics_cmd, NULL);
1205 else
1206 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1207 sizeof(struct iwl_statistics_cmd),
1208 &statistics_cmd);
1209}
1210EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1211
1212void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1213 struct iwl_rx_mem_buffer *rxb)
1214{
1215#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1217 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1218 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1219 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1220#endif
1221}
1222EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1223
1224void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1225 struct iwl_rx_mem_buffer *rxb)
1226{
1227 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1228 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1229 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1230 "notification for %s:\n", len,
1231 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1232 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1233}
1234EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1235
1236void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1237 struct iwl_rx_mem_buffer *rxb)
1238{
1239 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1240
1241 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1242 "seq 0x%04X ser 0x%08X\n",
1243 le32_to_cpu(pkt->u.err_resp.error_type),
1244 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1245 pkt->u.err_resp.cmd_id,
1246 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1247 le32_to_cpu(pkt->u.err_resp.error_info));
1248}
1249EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1250
1251void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1252{
1253 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1254}
1255
1256int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1257 const struct ieee80211_tx_queue_params *params)
1258{
1259 struct iwl_priv *priv = hw->priv;
1260 struct iwl_rxon_context *ctx;
1261 unsigned long flags;
1262 int q;
1263
1264 IWL_DEBUG_MAC80211(priv, "enter\n");
1265
1266 if (!iwl_legacy_is_ready_rf(priv)) {
1267 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1268 return -EIO;
1269 }
1270
1271 if (queue >= AC_NUM) {
1272 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1273 return 0;
1274 }
1275
1276 q = AC_NUM - 1 - queue;
1277
1278 spin_lock_irqsave(&priv->lock, flags);
1279
1280 for_each_context(priv, ctx) {
1281 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1282 cpu_to_le16(params->cw_min);
1283 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1284 cpu_to_le16(params->cw_max);
1285 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1286 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1287 cpu_to_le16((params->txop * 32));
1288
1289 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1290 }
1291
1292 spin_unlock_irqrestore(&priv->lock, flags);
1293
1294 IWL_DEBUG_MAC80211(priv, "leave\n");
1295 return 0;
1296}
1297EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1298
1299int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1300{
1301 struct iwl_priv *priv = hw->priv;
1302
1303 return priv->ibss_manager == IWL_IBSS_MANAGER;
1304}
1305EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1306
1307static int
1308iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1309{
1310 iwl_legacy_connection_init_rx_config(priv, ctx);
1311
1312 if (priv->cfg->ops->hcmd->set_rxon_chain)
1313 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1314
1315 return iwl_legacy_commit_rxon(priv, ctx);
1316}
1317
1318static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1319 struct iwl_rxon_context *ctx)
1320{
1321 struct ieee80211_vif *vif = ctx->vif;
1322 int err;
1323
1324 lockdep_assert_held(&priv->mutex);
1325
1326 /*
1327 * This variable will be correct only when there's just
1328 * a single context, but all code using it is for hardware
1329 * that supports only one context.
1330 */
1331 priv->iw_mode = vif->type;
1332
1333 ctx->is_active = true;
1334
1335 err = iwl_legacy_set_mode(priv, ctx);
1336 if (err) {
1337 if (!ctx->always_active)
1338 ctx->is_active = false;
1339 return err;
1340 }
1341
1342 return 0;
1343}
1344
1345int
1346iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1347{
1348 struct iwl_priv *priv = hw->priv;
1349 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1350 struct iwl_rxon_context *tmp, *ctx = NULL;
1351 int err;
1352
1353 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1354 vif->type, vif->addr);
1355
1356 mutex_lock(&priv->mutex);
1357
1358 if (!iwl_legacy_is_ready_rf(priv)) {
1359 IWL_WARN(priv, "Try to add interface when device not ready\n");
1360 err = -EINVAL;
1361 goto out;
1362 }
1363
1364 for_each_context(priv, tmp) {
1365 u32 possible_modes =
1366 tmp->interface_modes | tmp->exclusive_interface_modes;
1367
1368 if (tmp->vif) {
1369 /* check if this busy context is exclusive */
1370 if (tmp->exclusive_interface_modes &
1371 BIT(tmp->vif->type)) {
1372 err = -EINVAL;
1373 goto out;
1374 }
1375 continue;
1376 }
1377
1378 if (!(possible_modes & BIT(vif->type)))
1379 continue;
1380
1381 /* have maybe usable context w/o interface */
1382 ctx = tmp;
1383 break;
1384 }
1385
1386 if (!ctx) {
1387 err = -EOPNOTSUPP;
1388 goto out;
1389 }
1390
1391 vif_priv->ctx = ctx;
1392 ctx->vif = vif;
1393
1394 err = iwl_legacy_setup_interface(priv, ctx);
1395 if (!err)
1396 goto out;
1397
1398 ctx->vif = NULL;
1399 priv->iw_mode = NL80211_IFTYPE_STATION;
1400 out:
1401 mutex_unlock(&priv->mutex);
1402
1403 IWL_DEBUG_MAC80211(priv, "leave\n");
1404 return err;
1405}
1406EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1407
1408static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1409 struct ieee80211_vif *vif,
1410 bool mode_change)
1411{
1412 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1413
1414 lockdep_assert_held(&priv->mutex);
1415
1416 if (priv->scan_vif == vif) {
1417 iwl_legacy_scan_cancel_timeout(priv, 200);
1418 iwl_legacy_force_scan_end(priv);
1419 }
1420
1421 if (!mode_change) {
1422 iwl_legacy_set_mode(priv, ctx);
1423 if (!ctx->always_active)
1424 ctx->is_active = false;
1425 }
1426}
1427
1428void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1429 struct ieee80211_vif *vif)
1430{
1431 struct iwl_priv *priv = hw->priv;
1432 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1433
1434 IWL_DEBUG_MAC80211(priv, "enter\n");
1435
1436 mutex_lock(&priv->mutex);
1437
1438 WARN_ON(ctx->vif != vif);
1439 ctx->vif = NULL;
1440
1441 iwl_legacy_teardown_interface(priv, vif, false);
1442
1443 memset(priv->bssid, 0, ETH_ALEN);
1444 mutex_unlock(&priv->mutex);
1445
1446 IWL_DEBUG_MAC80211(priv, "leave\n");
1447
1448}
1449EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1450
1451int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1452{
1453 if (!priv->txq)
1454 priv->txq = kzalloc(
1455 sizeof(struct iwl_tx_queue) *
1456 priv->cfg->base_params->num_of_queues,
1457 GFP_KERNEL);
1458 if (!priv->txq) {
1459 IWL_ERR(priv, "Not enough memory for txq\n");
1460 return -ENOMEM;
1461 }
1462 return 0;
1463}
1464EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1465
1466void iwl_legacy_txq_mem(struct iwl_priv *priv)
1467{
1468 kfree(priv->txq);
1469 priv->txq = NULL;
1470}
1471EXPORT_SYMBOL(iwl_legacy_txq_mem);
1472
1473#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1474
1475#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1476
1477void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1478{
1479 priv->tx_traffic_idx = 0;
1480 priv->rx_traffic_idx = 0;
1481 if (priv->tx_traffic)
1482 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483 if (priv->rx_traffic)
1484 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1485}
1486
1487int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1488{
1489 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1490
1491 if (iwlegacy_debug_level & IWL_DL_TX) {
1492 if (!priv->tx_traffic) {
1493 priv->tx_traffic =
1494 kzalloc(traffic_size, GFP_KERNEL);
1495 if (!priv->tx_traffic)
1496 return -ENOMEM;
1497 }
1498 }
1499 if (iwlegacy_debug_level & IWL_DL_RX) {
1500 if (!priv->rx_traffic) {
1501 priv->rx_traffic =
1502 kzalloc(traffic_size, GFP_KERNEL);
1503 if (!priv->rx_traffic)
1504 return -ENOMEM;
1505 }
1506 }
1507 iwl_legacy_reset_traffic_log(priv);
1508 return 0;
1509}
1510EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1511
1512void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1513{
1514 kfree(priv->tx_traffic);
1515 priv->tx_traffic = NULL;
1516
1517 kfree(priv->rx_traffic);
1518 priv->rx_traffic = NULL;
1519}
1520EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1521
1522void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1523 u16 length, struct ieee80211_hdr *header)
1524{
1525 __le16 fc;
1526 u16 len;
1527
1528 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1529 return;
1530
1531 if (!priv->tx_traffic)
1532 return;
1533
1534 fc = header->frame_control;
1535 if (ieee80211_is_data(fc)) {
1536 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1537 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1538 memcpy((priv->tx_traffic +
1539 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1540 header, len);
1541 priv->tx_traffic_idx =
1542 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1543 }
1544}
1545EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1546
1547void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1548 u16 length, struct ieee80211_hdr *header)
1549{
1550 __le16 fc;
1551 u16 len;
1552
1553 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1554 return;
1555
1556 if (!priv->rx_traffic)
1557 return;
1558
1559 fc = header->frame_control;
1560 if (ieee80211_is_data(fc)) {
1561 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1562 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1563 memcpy((priv->rx_traffic +
1564 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1565 header, len);
1566 priv->rx_traffic_idx =
1567 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1568 }
1569}
1570EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1571
1572const char *iwl_legacy_get_mgmt_string(int cmd)
1573{
1574 switch (cmd) {
1575 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1578 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1579 IWL_CMD(MANAGEMENT_PROBE_REQ);
1580 IWL_CMD(MANAGEMENT_PROBE_RESP);
1581 IWL_CMD(MANAGEMENT_BEACON);
1582 IWL_CMD(MANAGEMENT_ATIM);
1583 IWL_CMD(MANAGEMENT_DISASSOC);
1584 IWL_CMD(MANAGEMENT_AUTH);
1585 IWL_CMD(MANAGEMENT_DEAUTH);
1586 IWL_CMD(MANAGEMENT_ACTION);
1587 default:
1588 return "UNKNOWN";
1589
1590 }
1591}
1592
1593const char *iwl_legacy_get_ctrl_string(int cmd)
1594{
1595 switch (cmd) {
1596 IWL_CMD(CONTROL_BACK_REQ);
1597 IWL_CMD(CONTROL_BACK);
1598 IWL_CMD(CONTROL_PSPOLL);
1599 IWL_CMD(CONTROL_RTS);
1600 IWL_CMD(CONTROL_CTS);
1601 IWL_CMD(CONTROL_ACK);
1602 IWL_CMD(CONTROL_CFEND);
1603 IWL_CMD(CONTROL_CFENDACK);
1604 default:
1605 return "UNKNOWN";
1606
1607 }
1608}
1609
1610void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1611{
1612 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1613 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1614}
1615
1616/*
1617 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1618 * iwl_legacy_update_stats function will
1619 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1620 * Use debugFs to display the rx/rx_statistics
1621 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1622 * information will be recorded, but DATA pkt still will be recorded
1623 * for the reason of iwl_led.c need to control the led blinking based on
1624 * number of tx and rx data.
1625 *
1626 */
1627void
1628iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1629{
1630 struct traffic_stats *stats;
1631
1632 if (is_tx)
1633 stats = &priv->tx_stats;
1634 else
1635 stats = &priv->rx_stats;
1636
1637 if (ieee80211_is_mgmt(fc)) {
1638 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1639 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1640 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1643 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1646 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1649 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1652 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1655 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1658 stats->mgmt[MANAGEMENT_BEACON]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1661 stats->mgmt[MANAGEMENT_ATIM]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1664 stats->mgmt[MANAGEMENT_DISASSOC]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1667 stats->mgmt[MANAGEMENT_AUTH]++;
1668 break;
1669 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1670 stats->mgmt[MANAGEMENT_DEAUTH]++;
1671 break;
1672 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1673 stats->mgmt[MANAGEMENT_ACTION]++;
1674 break;
1675 }
1676 } else if (ieee80211_is_ctl(fc)) {
1677 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1678 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1679 stats->ctrl[CONTROL_BACK_REQ]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_BACK):
1682 stats->ctrl[CONTROL_BACK]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1685 stats->ctrl[CONTROL_PSPOLL]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_RTS):
1688 stats->ctrl[CONTROL_RTS]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_CTS):
1691 stats->ctrl[CONTROL_CTS]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_ACK):
1694 stats->ctrl[CONTROL_ACK]++;
1695 break;
1696 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1697 stats->ctrl[CONTROL_CFEND]++;
1698 break;
1699 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1700 stats->ctrl[CONTROL_CFENDACK]++;
1701 break;
1702 }
1703 } else {
1704 /* data */
1705 stats->data_cnt++;
1706 stats->data_bytes += len;
1707 }
1708}
1709EXPORT_SYMBOL(iwl_legacy_update_stats);
1710#endif
1711
1712static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
1713{
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return;
1716
1717 if (!iwl_legacy_is_any_associated(priv)) {
1718 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1719 return;
1720 }
1721 /*
1722 * There is no easy and better way to force reset the radio,
1723 * the only known method is switching channel which will force to
1724 * reset and tune the radio.
1725 * Use internal short scan (single channel) operation to should
1726 * achieve this objective.
1727 * Driver should reset the radio when number of consecutive missed
1728 * beacon, or any other uCode error condition detected.
1729 */
1730 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1731 iwl_legacy_internal_short_hw_scan(priv);
1732}
1733
1734
1735int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
1736{
1737 struct iwl_force_reset *force_reset;
1738
1739 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1740 return -EINVAL;
1741
1742 if (mode >= IWL_MAX_FORCE_RESET) {
1743 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1744 return -EINVAL;
1745 }
1746 force_reset = &priv->force_reset[mode];
1747 force_reset->reset_request_count++;
1748 if (!external) {
1749 if (force_reset->last_force_reset_jiffies &&
1750 time_after(force_reset->last_force_reset_jiffies +
1751 force_reset->reset_duration, jiffies)) {
1752 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1753 force_reset->reset_reject_count++;
1754 return -EAGAIN;
1755 }
1756 }
1757 force_reset->reset_success_count++;
1758 force_reset->last_force_reset_jiffies = jiffies;
1759 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1760 switch (mode) {
1761 case IWL_RF_RESET:
1762 _iwl_legacy_force_rf_reset(priv);
1763 break;
1764 case IWL_FW_RESET:
1765 /*
1766 * if the request is from external(ex: debugfs),
1767 * then always perform the request in regardless the module
1768 * parameter setting
1769 * if the request is from internal (uCode error or driver
1770 * detect failure), then fw_restart module parameter
1771 * need to be check before performing firmware reload
1772 */
1773 if (!external && !priv->cfg->mod_params->restart_fw) {
1774 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1775 "module parameter setting\n");
1776 break;
1777 }
1778 IWL_ERR(priv, "On demand firmware reload\n");
1779 /* Set the FW error flag -- cleared on iwl_down */
1780 set_bit(STATUS_FW_ERROR, &priv->status);
1781 wake_up_interruptible(&priv->wait_command_queue);
1782 /*
1783 * Keep the restart process from trying to send host
1784 * commands by clearing the INIT status bit
1785 */
1786 clear_bit(STATUS_READY, &priv->status);
1787 queue_work(priv->workqueue, &priv->restart);
1788 break;
1789 }
1790 return 0;
1791}
1792
1793int
1794iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1795 struct ieee80211_vif *vif,
1796 enum nl80211_iftype newtype, bool newp2p)
1797{
1798 struct iwl_priv *priv = hw->priv;
1799 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1800 struct iwl_rxon_context *tmp;
1801 u32 interface_modes;
1802 int err;
1803
1804 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1805
1806 mutex_lock(&priv->mutex);
1807
1808 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1809
1810 if (!(interface_modes & BIT(newtype))) {
1811 err = -EBUSY;
1812 goto out;
1813 }
1814
1815 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1816 for_each_context(priv, tmp) {
1817 if (ctx == tmp)
1818 continue;
1819
1820 if (!tmp->vif)
1821 continue;
1822
1823 /*
1824 * The current mode switch would be exclusive, but
1825 * another context is active ... refuse the switch.
1826 */
1827 err = -EBUSY;
1828 goto out;
1829 }
1830 }
1831
1832 /* success */
1833 iwl_legacy_teardown_interface(priv, vif, true);
1834 vif->type = newtype;
1835 err = iwl_legacy_setup_interface(priv, ctx);
1836 WARN_ON(err);
1837 /*
1838 * We've switched internally, but submitting to the
1839 * device may have failed for some reason. Mask this
1840 * error, because otherwise mac80211 will not switch
1841 * (and set the interface type back) and we'll be
1842 * out of sync with it.
1843 */
1844 err = 0;
1845
1846 out:
1847 mutex_unlock(&priv->mutex);
1848 return err;
1849}
1850EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1851
1852/*
1853 * On every watchdog tick we check (latest) time stamp. If it does not
1854 * change during timeout period and queue is not empty we reset firmware.
1855 */
1856static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1857{
1858 struct iwl_tx_queue *txq = &priv->txq[cnt];
1859 struct iwl_queue *q = &txq->q;
1860 unsigned long timeout;
1861 int ret;
1862
1863 if (q->read_ptr == q->write_ptr) {
1864 txq->time_stamp = jiffies;
1865 return 0;
1866 }
1867
1868 timeout = txq->time_stamp +
1869 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1870
1871 if (time_after(jiffies, timeout)) {
1872 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1873 q->id, priv->cfg->base_params->wd_timeout);
1874 ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
1875 return (ret == -EAGAIN) ? 0 : 1;
1876 }
1877
1878 return 0;
1879}
1880
1881/*
1882 * Making watchdog tick be a quarter of timeout assure we will
1883 * discover the queue hung between timeout and 1.25*timeout
1884 */
1885#define IWL_WD_TICK(timeout) ((timeout) / 4)
1886
1887/*
1888 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1889 * we reset the firmware. If everything is fine just rearm the timer.
1890 */
1891void iwl_legacy_bg_watchdog(unsigned long data)
1892{
1893 struct iwl_priv *priv = (struct iwl_priv *)data;
1894 int cnt;
1895 unsigned long timeout;
1896
1897 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1898 return;
1899
1900 timeout = priv->cfg->base_params->wd_timeout;
1901 if (timeout == 0)
1902 return;
1903
1904 /* monitor and check for stuck cmd queue */
1905 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1906 return;
1907
1908 /* monitor and check for other stuck queues */
1909 if (iwl_legacy_is_any_associated(priv)) {
1910 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1911 /* skip as we already checked the command queue */
1912 if (cnt == priv->cmd_queue)
1913 continue;
1914 if (iwl_legacy_check_stuck_queue(priv, cnt))
1915 return;
1916 }
1917 }
1918
1919 mod_timer(&priv->watchdog, jiffies +
1920 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1921}
1922EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1923
1924void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1925{
1926 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1927
1928 if (timeout)
1929 mod_timer(&priv->watchdog,
1930 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1931 else
1932 del_timer(&priv->watchdog);
1933}
1934EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1935
1936/*
1937 * extended beacon time format
1938 * time in usec will be changed into a 32-bit value in extended:internal format
1939 * the extended part is the beacon counts
1940 * the internal part is the time in usec within one beacon interval
1941 */
1942u32
1943iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1944 u32 usec, u32 beacon_interval)
1945{
1946 u32 quot;
1947 u32 rem;
1948 u32 interval = beacon_interval * TIME_UNIT;
1949
1950 if (!interval || !usec)
1951 return 0;
1952
1953 quot = (usec / interval) &
1954 (iwl_legacy_beacon_time_mask_high(priv,
1955 priv->hw_params.beacon_time_tsf_bits) >>
1956 priv->hw_params.beacon_time_tsf_bits);
1957 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1958 priv->hw_params.beacon_time_tsf_bits);
1959
1960 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1961}
1962EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1963
1964/* base is usually what we get from ucode with each received frame,
1965 * the same as HW timer counter counting down
1966 */
1967__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1968 u32 addon, u32 beacon_interval)
1969{
1970 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1971 priv->hw_params.beacon_time_tsf_bits);
1972 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1973 priv->hw_params.beacon_time_tsf_bits);
1974 u32 interval = beacon_interval * TIME_UNIT;
1975 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1976 priv->hw_params.beacon_time_tsf_bits)) +
1977 (addon & iwl_legacy_beacon_time_mask_high(priv,
1978 priv->hw_params.beacon_time_tsf_bits));
1979
1980 if (base_low > addon_low)
1981 res += base_low - addon_low;
1982 else if (base_low < addon_low) {
1983 res += interval + base_low - addon_low;
1984 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1985 } else
1986 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1987
1988 return cpu_to_le32(res);
1989}
1990EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1991
1992#ifdef CONFIG_PM
1993
1994int iwl_legacy_pci_suspend(struct device *device)
1995{
1996 struct pci_dev *pdev = to_pci_dev(device);
1997 struct iwl_priv *priv = pci_get_drvdata(pdev);
1998
1999 /*
2000 * This function is called when system goes into suspend state
2001 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2002 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2003 * it will not call apm_ops.stop() to stop the DMA operation.
2004 * Calling apm_ops.stop here to make sure we stop the DMA.
2005 */
2006 iwl_legacy_apm_stop(priv);
2007
2008 return 0;
2009}
2010EXPORT_SYMBOL(iwl_legacy_pci_suspend);
2011
2012int iwl_legacy_pci_resume(struct device *device)
2013{
2014 struct pci_dev *pdev = to_pci_dev(device);
2015 struct iwl_priv *priv = pci_get_drvdata(pdev);
2016 bool hw_rfkill = false;
2017
2018 /*
2019 * We disable the RETRY_TIMEOUT register (0x41) to keep
2020 * PCI Tx retries from interfering with C3 CPU state.
2021 */
2022 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2023
2024 iwl_legacy_enable_interrupts(priv);
2025
2026 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2027 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2028 hw_rfkill = true;
2029
2030 if (hw_rfkill)
2031 set_bit(STATUS_RF_KILL_HW, &priv->status);
2032 else
2033 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2034
2035 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2036
2037 return 0;
2038}
2039EXPORT_SYMBOL(iwl_legacy_pci_resume);
2040
2041const struct dev_pm_ops iwl_legacy_pm_ops = {
2042 .suspend = iwl_legacy_pci_suspend,
2043 .resume = iwl_legacy_pci_resume,
2044 .freeze = iwl_legacy_pci_suspend,
2045 .thaw = iwl_legacy_pci_resume,
2046 .poweroff = iwl_legacy_pci_suspend,
2047 .restore = iwl_legacy_pci_resume,
2048};
2049EXPORT_SYMBOL(iwl_legacy_pm_ops);
2050
2051#endif /* CONFIG_PM */
2052
2053static void
2054iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2055{
2056 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2057 return;
2058
2059 if (!ctx->is_active)
2060 return;
2061
2062 ctx->qos_data.def_qos_parm.qos_flags = 0;
2063
2064 if (ctx->qos_data.qos_active)
2065 ctx->qos_data.def_qos_parm.qos_flags |=
2066 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2067
2068 if (ctx->ht.enabled)
2069 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2070
2071 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2072 ctx->qos_data.qos_active,
2073 ctx->qos_data.def_qos_parm.qos_flags);
2074
2075 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2076 sizeof(struct iwl_qosparam_cmd),
2077 &ctx->qos_data.def_qos_parm, NULL);
2078}
2079
2080/**
2081 * iwl_legacy_mac_config - mac80211 config callback
2082 */
2083int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2084{
2085 struct iwl_priv *priv = hw->priv;
2086 const struct iwl_channel_info *ch_info;
2087 struct ieee80211_conf *conf = &hw->conf;
2088 struct ieee80211_channel *channel = conf->channel;
2089 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2090 struct iwl_rxon_context *ctx;
2091 unsigned long flags = 0;
2092 int ret = 0;
2093 u16 ch;
2094 int scan_active = 0;
2095 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2096
2097 if (WARN_ON(!priv->cfg->ops->legacy))
2098 return -EOPNOTSUPP;
2099
2100 mutex_lock(&priv->mutex);
2101
2102 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2103 channel->hw_value, changed);
2104
2105 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2106 test_bit(STATUS_SCANNING, &priv->status))) {
2107 scan_active = 1;
2108 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2109 }
2110
2111 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2112 IEEE80211_CONF_CHANGE_CHANNEL)) {
2113 /* mac80211 uses static for non-HT which is what we want */
2114 priv->current_ht_config.smps = conf->smps_mode;
2115
2116 /*
2117 * Recalculate chain counts.
2118 *
2119 * If monitor mode is enabled then mac80211 will
2120 * set up the SM PS mode to OFF if an HT channel is
2121 * configured.
2122 */
2123 if (priv->cfg->ops->hcmd->set_rxon_chain)
2124 for_each_context(priv, ctx)
2125 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2126 }
2127
2128 /* during scanning mac80211 will delay channel setting until
2129 * scan finish with changed = 0
2130 */
2131 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2132 if (scan_active)
2133 goto set_ch_out;
2134
2135 ch = channel->hw_value;
2136 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2137 if (!iwl_legacy_is_channel_valid(ch_info)) {
2138 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2139 ret = -EINVAL;
2140 goto set_ch_out;
2141 }
2142
2143 spin_lock_irqsave(&priv->lock, flags);
2144
2145 for_each_context(priv, ctx) {
2146 /* Configure HT40 channels */
2147 if (ctx->ht.enabled != conf_is_ht(conf)) {
2148 ctx->ht.enabled = conf_is_ht(conf);
2149 ht_changed[ctx->ctxid] = true;
2150 }
2151 if (ctx->ht.enabled) {
2152 if (conf_is_ht40_minus(conf)) {
2153 ctx->ht.extension_chan_offset =
2154 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2155 ctx->ht.is_40mhz = true;
2156 } else if (conf_is_ht40_plus(conf)) {
2157 ctx->ht.extension_chan_offset =
2158 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2159 ctx->ht.is_40mhz = true;
2160 } else {
2161 ctx->ht.extension_chan_offset =
2162 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2163 ctx->ht.is_40mhz = false;
2164 }
2165 } else
2166 ctx->ht.is_40mhz = false;
2167
2168 /*
2169 * Default to no protection. Protection mode will
2170 * later be set from BSS config in iwl_ht_conf
2171 */
2172 ctx->ht.protection =
2173 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2174
2175 /* if we are switching from ht to 2.4 clear flags
2176 * from any ht related info since 2.4 does not
2177 * support ht */
2178 if ((le16_to_cpu(ctx->staging.channel) != ch))
2179 ctx->staging.flags = 0;
2180
2181 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2182 iwl_legacy_set_rxon_ht(priv, ht_conf);
2183
2184 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2185 ctx->vif);
2186 }
2187
2188 spin_unlock_irqrestore(&priv->lock, flags);
2189
2190 if (priv->cfg->ops->legacy->update_bcast_stations)
2191 ret =
2192 priv->cfg->ops->legacy->update_bcast_stations(priv);
2193
2194 set_ch_out:
2195 /* The list of supported rates and rate mask can be different
2196 * for each band; since the band may have changed, reset
2197 * the rate mask to what mac80211 lists */
2198 iwl_legacy_set_rate(priv);
2199 }
2200
2201 if (changed & (IEEE80211_CONF_CHANGE_PS |
2202 IEEE80211_CONF_CHANGE_IDLE)) {
2203 ret = iwl_legacy_power_update_mode(priv, false);
2204 if (ret)
2205 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2206 }
2207
2208 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2209 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2210 priv->tx_power_user_lmt, conf->power_level);
2211
2212 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2213 }
2214
2215 if (!iwl_legacy_is_ready(priv)) {
2216 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2217 goto out;
2218 }
2219
2220 if (scan_active)
2221 goto out;
2222
2223 for_each_context(priv, ctx) {
2224 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2225 iwl_legacy_commit_rxon(priv, ctx);
2226 else
2227 IWL_DEBUG_INFO(priv,
2228 "Not re-sending same RXON configuration.\n");
2229 if (ht_changed[ctx->ctxid])
2230 iwl_legacy_update_qos(priv, ctx);
2231 }
2232
2233out:
2234 IWL_DEBUG_MAC80211(priv, "leave\n");
2235 mutex_unlock(&priv->mutex);
2236 return ret;
2237}
2238EXPORT_SYMBOL(iwl_legacy_mac_config);
2239
2240void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2241{
2242 struct iwl_priv *priv = hw->priv;
2243 unsigned long flags;
2244 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2245 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2246
2247 if (WARN_ON(!priv->cfg->ops->legacy))
2248 return;
2249
2250 mutex_lock(&priv->mutex);
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 spin_lock_irqsave(&priv->lock, flags);
2254 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2255 spin_unlock_irqrestore(&priv->lock, flags);
2256
2257 spin_lock_irqsave(&priv->lock, flags);
2258
2259 /* new association get rid of ibss beacon skb */
2260 if (priv->beacon_skb)
2261 dev_kfree_skb(priv->beacon_skb);
2262
2263 priv->beacon_skb = NULL;
2264
2265 priv->timestamp = 0;
2266
2267 spin_unlock_irqrestore(&priv->lock, flags);
2268
2269 iwl_legacy_scan_cancel_timeout(priv, 100);
2270 if (!iwl_legacy_is_ready_rf(priv)) {
2271 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2272 mutex_unlock(&priv->mutex);
2273 return;
2274 }
2275
2276 /* we are restarting association process
2277 * clear RXON_FILTER_ASSOC_MSK bit
2278 */
2279 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2280 iwl_legacy_commit_rxon(priv, ctx);
2281
2282 iwl_legacy_set_rate(priv);
2283
2284 mutex_unlock(&priv->mutex);
2285
2286 IWL_DEBUG_MAC80211(priv, "leave\n");
2287}
2288EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2289
2290static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2291 struct ieee80211_vif *vif)
2292{
2293 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2294 struct ieee80211_sta *sta;
2295 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2296 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2297
2298 IWL_DEBUG_ASSOC(priv, "enter:\n");
2299
2300 if (!ctx->ht.enabled)
2301 return;
2302
2303 ctx->ht.protection =
2304 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2305 ctx->ht.non_gf_sta_present =
2306 !!(bss_conf->ht_operation_mode &
2307 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2308
2309 ht_conf->single_chain_sufficient = false;
2310
2311 switch (vif->type) {
2312 case NL80211_IFTYPE_STATION:
2313 rcu_read_lock();
2314 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2315 if (sta) {
2316 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2317 int maxstreams;
2318
2319 maxstreams = (ht_cap->mcs.tx_params &
2320 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2321 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2322 maxstreams += 1;
2323
2324 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2325 (ht_cap->mcs.rx_mask[2] == 0))
2326 ht_conf->single_chain_sufficient = true;
2327 if (maxstreams <= 1)
2328 ht_conf->single_chain_sufficient = true;
2329 } else {
2330 /*
2331 * If at all, this can only happen through a race
2332 * when the AP disconnects us while we're still
2333 * setting up the connection, in that case mac80211
2334 * will soon tell us about that.
2335 */
2336 ht_conf->single_chain_sufficient = true;
2337 }
2338 rcu_read_unlock();
2339 break;
2340 case NL80211_IFTYPE_ADHOC:
2341 ht_conf->single_chain_sufficient = true;
2342 break;
2343 default:
2344 break;
2345 }
2346
2347 IWL_DEBUG_ASSOC(priv, "leave\n");
2348}
2349
2350static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2354
2355 /*
2356 * inform the ucode that there is no longer an
2357 * association and that no more packets should be
2358 * sent
2359 */
2360 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2361 ctx->staging.assoc_id = 0;
2362 iwl_legacy_commit_rxon(priv, ctx);
2363}
2364
2365static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2366 struct ieee80211_vif *vif)
2367{
2368 struct iwl_priv *priv = hw->priv;
2369 unsigned long flags;
2370 __le64 timestamp;
2371 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2372
2373 if (!skb)
2374 return;
2375
2376 IWL_DEBUG_MAC80211(priv, "enter\n");
2377
2378 lockdep_assert_held(&priv->mutex);
2379
2380 if (!priv->beacon_ctx) {
2381 IWL_ERR(priv, "update beacon but no beacon context!\n");
2382 dev_kfree_skb(skb);
2383 return;
2384 }
2385
2386 spin_lock_irqsave(&priv->lock, flags);
2387
2388 if (priv->beacon_skb)
2389 dev_kfree_skb(priv->beacon_skb);
2390
2391 priv->beacon_skb = skb;
2392
2393 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2394 priv->timestamp = le64_to_cpu(timestamp);
2395
2396 IWL_DEBUG_MAC80211(priv, "leave\n");
2397 spin_unlock_irqrestore(&priv->lock, flags);
2398
2399 if (!iwl_legacy_is_ready_rf(priv)) {
2400 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2401 return;
2402 }
2403
2404 priv->cfg->ops->legacy->post_associate(priv);
2405}
2406
2407void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2408 struct ieee80211_vif *vif,
2409 struct ieee80211_bss_conf *bss_conf,
2410 u32 changes)
2411{
2412 struct iwl_priv *priv = hw->priv;
2413 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2414 int ret;
2415
2416 if (WARN_ON(!priv->cfg->ops->legacy))
2417 return;
2418
2419 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2420
2421 if (!iwl_legacy_is_alive(priv))
2422 return;
2423
2424 mutex_lock(&priv->mutex);
2425
2426 if (changes & BSS_CHANGED_QOS) {
2427 unsigned long flags;
2428
2429 spin_lock_irqsave(&priv->lock, flags);
2430 ctx->qos_data.qos_active = bss_conf->qos;
2431 iwl_legacy_update_qos(priv, ctx);
2432 spin_unlock_irqrestore(&priv->lock, flags);
2433 }
2434
2435 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2436 /*
2437 * the add_interface code must make sure we only ever
2438 * have a single interface that could be beaconing at
2439 * any time.
2440 */
2441 if (vif->bss_conf.enable_beacon)
2442 priv->beacon_ctx = ctx;
2443 else
2444 priv->beacon_ctx = NULL;
2445 }
2446
2447 if (changes & BSS_CHANGED_BSSID) {
2448 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2449
2450 /*
2451 * If there is currently a HW scan going on in the
2452 * background then we need to cancel it else the RXON
2453 * below/in post_associate will fail.
2454 */
2455 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2456 IWL_WARN(priv,
2457 "Aborted scan still in progress after 100ms\n");
2458 IWL_DEBUG_MAC80211(priv,
2459 "leaving - scan abort failed.\n");
2460 mutex_unlock(&priv->mutex);
2461 return;
2462 }
2463
2464 /* mac80211 only sets assoc when in STATION mode */
2465 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2466 memcpy(ctx->staging.bssid_addr,
2467 bss_conf->bssid, ETH_ALEN);
2468
2469 /* currently needed in a few places */
2470 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2471 } else {
2472 ctx->staging.filter_flags &=
2473 ~RXON_FILTER_ASSOC_MSK;
2474 }
2475
2476 }
2477
2478 /*
2479 * This needs to be after setting the BSSID in case
2480 * mac80211 decides to do both changes at once because
2481 * it will invoke post_associate.
2482 */
2483 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2484 iwl_legacy_beacon_update(hw, vif);
2485
2486 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2487 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2488 bss_conf->use_short_preamble);
2489 if (bss_conf->use_short_preamble)
2490 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2491 else
2492 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2493 }
2494
2495 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2496 IWL_DEBUG_MAC80211(priv,
2497 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2498 if (bss_conf->use_cts_prot &&
2499 (priv->band != IEEE80211_BAND_5GHZ))
2500 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2501 else
2502 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2503 if (bss_conf->use_cts_prot)
2504 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2505 else
2506 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2507 }
2508
2509 if (changes & BSS_CHANGED_BASIC_RATES) {
2510 /* XXX use this information
2511 *
2512 * To do that, remove code from iwl_legacy_set_rate() and put something
2513 * like this here:
2514 *
2515 if (A-band)
2516 ctx->staging.ofdm_basic_rates =
2517 bss_conf->basic_rates;
2518 else
2519 ctx->staging.ofdm_basic_rates =
2520 bss_conf->basic_rates >> 4;
2521 ctx->staging.cck_basic_rates =
2522 bss_conf->basic_rates & 0xF;
2523 */
2524 }
2525
2526 if (changes & BSS_CHANGED_HT) {
2527 iwl_legacy_ht_conf(priv, vif);
2528
2529 if (priv->cfg->ops->hcmd->set_rxon_chain)
2530 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2531 }
2532
2533 if (changes & BSS_CHANGED_ASSOC) {
2534 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2535 if (bss_conf->assoc) {
2536 priv->timestamp = bss_conf->timestamp;
2537
2538 if (!iwl_legacy_is_rfkill(priv))
2539 priv->cfg->ops->legacy->post_associate(priv);
2540 } else
2541 iwl_legacy_set_no_assoc(priv, vif);
2542 }
2543
2544 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2545 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2546 changes);
2547 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2548 if (!ret) {
2549 /* Sync active_rxon with latest change. */
2550 memcpy((void *)&ctx->active,
2551 &ctx->staging,
2552 sizeof(struct iwl_legacy_rxon_cmd));
2553 }
2554 }
2555
2556 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2557 if (vif->bss_conf.enable_beacon) {
2558 memcpy(ctx->staging.bssid_addr,
2559 bss_conf->bssid, ETH_ALEN);
2560 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2561 priv->cfg->ops->legacy->config_ap(priv);
2562 } else
2563 iwl_legacy_set_no_assoc(priv, vif);
2564 }
2565
2566 if (changes & BSS_CHANGED_IBSS) {
2567 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2568 bss_conf->ibss_joined);
2569 if (ret)
2570 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2571 bss_conf->ibss_joined ? "add" : "remove",
2572 bss_conf->bssid);
2573 }
2574
2575 mutex_unlock(&priv->mutex);
2576
2577 IWL_DEBUG_MAC80211(priv, "leave\n");
2578}
2579EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2580
2581irqreturn_t iwl_legacy_isr(int irq, void *data)
2582{
2583 struct iwl_priv *priv = data;
2584 u32 inta, inta_mask;
2585 u32 inta_fh;
2586 unsigned long flags;
2587 if (!priv)
2588 return IRQ_NONE;
2589
2590 spin_lock_irqsave(&priv->lock, flags);
2591
2592 /* Disable (but don't clear!) interrupts here to avoid
2593 * back-to-back ISRs and sporadic interrupts from our NIC.
2594 * If we have something to service, the tasklet will re-enable ints.
2595 * If we *don't* have something, we'll re-enable before leaving here. */
2596 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2597 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2598
2599 /* Discover which interrupts are active/pending */
2600 inta = iwl_read32(priv, CSR_INT);
2601 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2602
2603 /* Ignore interrupt if there's nothing in NIC to service.
2604 * This may be due to IRQ shared with another device,
2605 * or due to sporadic interrupts thrown from our NIC. */
2606 if (!inta && !inta_fh) {
2607 IWL_DEBUG_ISR(priv,
2608 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2609 goto none;
2610 }
2611
2612 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2613 /* Hardware disappeared. It might have already raised
2614 * an interrupt */
2615 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2616 goto unplugged;
2617 }
2618
2619 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2620 inta, inta_mask, inta_fh);
2621
2622 inta &= ~CSR_INT_BIT_SCD;
2623
2624 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2625 if (likely(inta || inta_fh))
2626 tasklet_schedule(&priv->irq_tasklet);
2627
2628unplugged:
2629 spin_unlock_irqrestore(&priv->lock, flags);
2630 return IRQ_HANDLED;
2631
2632none:
2633 /* re-enable interrupts here since we don't have anything to service. */
2634 /* only Re-enable if diabled by irq */
2635 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2636 iwl_legacy_enable_interrupts(priv);
2637 spin_unlock_irqrestore(&priv->lock, flags);
2638 return IRQ_NONE;
2639}
2640EXPORT_SYMBOL(iwl_legacy_isr);
2641
2642/*
2643 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2644 * function.
2645 */
2646void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2647 struct ieee80211_tx_info *info,
2648 __le16 fc, __le32 *tx_flags)
2649{
2650 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2651 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2652 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2653 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2654
2655 if (!ieee80211_is_mgmt(fc))
2656 return;
2657
2658 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2659 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2660 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2661 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2662 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2663 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2664 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2665 break;
2666 }
2667 } else if (info->control.rates[0].flags &
2668 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2669 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2670 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2671 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2672 }
2673}
2674EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..f03b463e4378
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146 int (*dump_nic_event_log)(struct iwl_priv *priv,
147 bool full_log, char **buf, bool display);
148 void (*dump_nic_error_log)(struct iwl_priv *priv);
149 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
150 int (*set_channel_switch)(struct iwl_priv *priv,
151 struct ieee80211_channel_switch *ch_switch);
152 /* power management */
153 struct iwl_apm_ops apm_ops;
154
155 /* power */
156 int (*send_tx_power) (struct iwl_priv *priv);
157 void (*update_chain_flags)(struct iwl_priv *priv);
158
159 /* eeprom operations (as defined in iwl-eeprom.h) */
160 struct iwl_eeprom_ops eeprom_ops;
161
162 /* temperature */
163 struct iwl_temp_ops temp_ops;
164 /* check for plcp health */
165 bool (*check_plcp_health)(struct iwl_priv *priv,
166 struct iwl_rx_packet *pkt);
167
168 struct iwl_debugfs_ops debugfs_ops;
169
170};
171
172struct iwl_led_ops {
173 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
174};
175
176struct iwl_legacy_ops {
177 void (*post_associate)(struct iwl_priv *priv);
178 void (*config_ap)(struct iwl_priv *priv);
179 /* station management */
180 int (*update_bcast_stations)(struct iwl_priv *priv);
181 int (*manage_ibss_station)(struct iwl_priv *priv,
182 struct ieee80211_vif *vif, bool add);
183};
184
185struct iwl_ops {
186 const struct iwl_lib_ops *lib;
187 const struct iwl_hcmd_ops *hcmd;
188 const struct iwl_hcmd_utils_ops *utils;
189 const struct iwl_led_ops *led;
190 const struct iwl_nic_ops *nic;
191 const struct iwl_legacy_ops *legacy;
192 const struct ieee80211_ops *ieee80211_ops;
193};
194
195struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */
199 int disable_11n; /* def: 0 = 11n capabilities enabled */
200 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
201 int antenna; /* def: 0 = both antennas (use diversity) */
202 int restart_fw; /* def: 1 = restart firmware */
203};
204
205/*
206 * @led_compensation: compensate on the led on/off time per HW according
207 * to the deviation to achieve the desired led frequency.
208 * The detail algorithm is described in iwl-led.c
209 * @chain_noise_num_beacons: number of beacons used to compute chain noise
210 * @plcp_delta_threshold: plcp error rate threshold used to trigger
211 * radio tuning when there is a high receiving plcp error rate
212 * @wd_timeout: TX queues watchdog timeout
213 * @temperature_kelvin: temperature report by uCode in kelvin
214 * @max_event_log_size: size of event log buffer size for ucode event logging
215 * @ucode_tracing: support ucode continuous tracing
216 * @sensitivity_calib_by_driver: driver has the capability to perform
217 * sensitivity calibration operation
218 * @chain_noise_calib_by_driver: driver has the capability to perform
219 * chain noise calibration operation
220 */
221struct iwl_base_params {
222 int eeprom_size;
223 int num_of_queues; /* def: HW dependent */
224 int num_of_ampdu_queues;/* def: HW dependent */
225 /* for iwl_legacy_apm_init() */
226 u32 pll_cfg_val;
227 bool set_l0s;
228 bool use_bsm;
229
230 u16 led_compensation;
231 int chain_noise_num_beacons;
232 u8 plcp_delta_threshold;
233 unsigned int wd_timeout;
234 bool temperature_kelvin;
235 u32 max_event_log_size;
236 const bool ucode_tracing;
237 const bool sensitivity_calib_by_driver;
238 const bool chain_noise_calib_by_driver;
239};
240
241/**
242 * struct iwl_cfg
243 * @fw_name_pre: Firmware filename prefix. The api version and extension
244 * (.ucode) will be added to filename before loading from disk. The
245 * filename is constructed as fw_name_pre<api>.ucode.
246 * @ucode_api_max: Highest version of uCode API supported by driver.
247 * @ucode_api_min: Lowest version of uCode API supported by driver.
248 * @scan_antennas: available antenna for scan operation
249 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
250 *
251 * We enable the driver to be backward compatible wrt API version. The
252 * driver specifies which APIs it supports (with @ucode_api_max being the
253 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
254 * it has a supported API version. The firmware's API version will be
255 * stored in @iwl_priv, enabling the driver to make runtime changes based
256 * on firmware version used.
257 *
258 * For example,
259 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
260 * Driver interacts with Firmware API version >= 2.
261 * } else {
262 * Driver interacts with Firmware API version 1.
263 * }
264 *
265 * The ideal usage of this infrastructure is to treat a new ucode API
266 * release as a new hardware revision. That is, through utilizing the
267 * iwl_hcmd_utils_ops etc. we accommodate different command structures
268 * and flows between hardware versions as well as their API
269 * versions.
270 *
271 */
272struct iwl_cfg {
273 /* params specific to an individual device within a device family */
274 const char *name;
275 const char *fw_name_pre;
276 const unsigned int ucode_api_max;
277 const unsigned int ucode_api_min;
278 u8 valid_tx_ant;
279 u8 valid_rx_ant;
280 unsigned int sku;
281 u16 eeprom_ver;
282 u16 eeprom_calib_ver;
283 const struct iwl_ops *ops;
284 /* module based parameters which can be set from modprobe cmd */
285 const struct iwl_mod_params *mod_params;
286 /* params not likely to change within a device family */
287 struct iwl_base_params *base_params;
288 /* params likely to change within a device family */
289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
290 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
291 enum iwl_led_mode led_mode;
292};
293
294/***************************
295 * L i b *
296 ***************************/
297
298struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
299int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
300 const struct ieee80211_tx_queue_params *params);
301int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
302void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 int hw_decrypt);
305int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
306 struct iwl_rxon_context *ctx);
307int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
308 struct iwl_rxon_context *ctx);
309int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
310 struct ieee80211_channel *ch,
311 struct iwl_rxon_context *ctx);
312void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
313 struct iwl_rxon_context *ctx,
314 enum ieee80211_band band,
315 struct ieee80211_vif *vif);
316u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
317 enum ieee80211_band band);
318void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
319 struct iwl_ht_config *ht_conf);
320bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
321 struct iwl_rxon_context *ctx,
322 struct ieee80211_sta_ht_cap *ht_cap);
323void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
324 struct iwl_rxon_context *ctx);
325void iwl_legacy_set_rate(struct iwl_priv *priv);
326int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
327 struct ieee80211_hdr *hdr,
328 u32 decrypt_res,
329 struct ieee80211_rx_status *stats);
330void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
331int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif);
333void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
334 struct ieee80211_vif *vif);
335int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
336 struct ieee80211_vif *vif,
337 enum nl80211_iftype newtype, bool newp2p);
338int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
339void iwl_legacy_txq_mem(struct iwl_priv *priv);
340
341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
342int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
343void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
344void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
345void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
346 u16 length, struct ieee80211_hdr *header);
347void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
348 u16 length, struct ieee80211_hdr *header);
349const char *iwl_legacy_get_mgmt_string(int cmd);
350const char *iwl_legacy_get_ctrl_string(int cmd);
351void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
352void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
353 u16 len);
354#else
355static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
356{
357 return 0;
358}
359static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
360{
361}
362static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
363{
364}
365static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
366 u16 length, struct ieee80211_hdr *header)
367{
368}
369static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
370 u16 length, struct ieee80211_hdr *header)
371{
372}
373static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
374 __le16 fc, u16 len)
375{
376}
377#endif
378/*****************************************************
379 * RX handlers.
380 * **************************************************/
381void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
382 struct iwl_rx_mem_buffer *rxb);
383void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
384 struct iwl_rx_mem_buffer *rxb);
385void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
386 struct iwl_rx_mem_buffer *rxb);
387
388/*****************************************************
389* RX
390******************************************************/
391void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
392void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
393int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
394void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
395 struct iwl_rx_queue *q);
396int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
397void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
398 struct iwl_rx_mem_buffer *rxb);
399/* Handlers */
400void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
401 struct iwl_rx_mem_buffer *rxb);
402void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
403 struct iwl_rx_packet *pkt);
404void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
405void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
406
407/* TX helpers */
408
409/*****************************************************
410* TX
411******************************************************/
412void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
413 struct iwl_tx_queue *txq);
414int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
415 int slots_num, u32 txq_id);
416void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
417 struct iwl_tx_queue *txq,
418 int slots_num, u32 txq_id);
419void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
420void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
421void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
422/*****************************************************
423 * TX power
424 ****************************************************/
425int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
426
427/*******************************************************************************
428 * Rate
429 ******************************************************************************/
430
431u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
432 struct iwl_rxon_context *ctx);
433
434/*******************************************************************************
435 * Scanning
436 ******************************************************************************/
437void iwl_legacy_init_scan_params(struct iwl_priv *priv);
438int iwl_legacy_scan_cancel(struct iwl_priv *priv);
439int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
440void iwl_legacy_force_scan_end(struct iwl_priv *priv);
441int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
442 struct ieee80211_vif *vif,
443 struct cfg80211_scan_request *req);
444void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
445int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
446u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
447 struct ieee80211_mgmt *frame,
448 const u8 *ta, const u8 *ie, int ie_len, int left);
449void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
450u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
451 enum ieee80211_band band,
452 u8 n_probes);
453u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
454 enum ieee80211_band band,
455 struct ieee80211_vif *vif);
456void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
457void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
458
459/* For faster active scanning, scan will move to the next channel if fewer than
460 * PLCP_QUIET_THRESH packets are heard on this channel within
461 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
462 * time if it's a quiet channel (nothing responded to our probe, and there's
463 * no other traffic).
464 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
465#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
466#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
467
468#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
469
470/*****************************************************
471 * S e n d i n g H o s t C o m m a n d s *
472 *****************************************************/
473
474const char *iwl_legacy_get_cmd_string(u8 cmd);
475int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
476 struct iwl_host_cmd *cmd);
477int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
479 u16 len, const void *data);
480int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
481 const void *data,
482 void (*callback)(struct iwl_priv *priv,
483 struct iwl_device_cmd *cmd,
484 struct iwl_rx_packet *pkt));
485
486int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
487
488
489/*****************************************************
490 * PCI *
491 *****************************************************/
492
493static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
494{
495 int pos;
496 u16 pci_lnk_ctl;
497 pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
498 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
499 return pci_lnk_ctl;
500}
501
502void iwl_legacy_bg_watchdog(unsigned long data);
503u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
504 u32 usec, u32 beacon_interval);
505__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
506 u32 addon, u32 beacon_interval);
507
508#ifdef CONFIG_PM
509int iwl_legacy_pci_suspend(struct device *device);
510int iwl_legacy_pci_resume(struct device *device);
511extern const struct dev_pm_ops iwl_legacy_pm_ops;
512
513#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
514
515#else /* !CONFIG_PM */
516
517#define IWL_LEGACY_PM_OPS NULL
518
519#endif /* !CONFIG_PM */
520
521/*****************************************************
522* Error Handling Debugging
523******************************************************/
524void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
525int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
526 bool full_log, char **buf, bool display);
527#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
528void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
529 struct iwl_rxon_context *ctx);
530#else
531static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
532 struct iwl_rxon_context *ctx)
533{
534}
535#endif
536
537void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
538
539/*****************************************************
540* GEOS
541******************************************************/
542int iwl_legacy_init_geos(struct iwl_priv *priv);
543void iwl_legacy_free_geos(struct iwl_priv *priv);
544
545/*************** DRIVER STATUS FUNCTIONS *****/
546
547#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
548/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
549#define STATUS_INT_ENABLED 2
550#define STATUS_RF_KILL_HW 3
551#define STATUS_CT_KILL 4
552#define STATUS_INIT 5
553#define STATUS_ALIVE 6
554#define STATUS_READY 7
555#define STATUS_TEMPERATURE 8
556#define STATUS_GEO_CONFIGURED 9
557#define STATUS_EXIT_PENDING 10
558#define STATUS_STATISTICS 12
559#define STATUS_SCANNING 13
560#define STATUS_SCAN_ABORTING 14
561#define STATUS_SCAN_HW 15
562#define STATUS_POWER_PMI 16
563#define STATUS_FW_ERROR 17
564
565
566static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
567{
568 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
569 * set but EXIT_PENDING is not */
570 return test_bit(STATUS_READY, &priv->status) &&
571 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
572 !test_bit(STATUS_EXIT_PENDING, &priv->status);
573}
574
575static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
576{
577 return test_bit(STATUS_ALIVE, &priv->status);
578}
579
580static inline int iwl_legacy_is_init(struct iwl_priv *priv)
581{
582 return test_bit(STATUS_INIT, &priv->status);
583}
584
585static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
586{
587 return test_bit(STATUS_RF_KILL_HW, &priv->status);
588}
589
590static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
591{
592 return iwl_legacy_is_rfkill_hw(priv);
593}
594
595static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
596{
597 return test_bit(STATUS_CT_KILL, &priv->status);
598}
599
600static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
601{
602
603 if (iwl_legacy_is_rfkill(priv))
604 return 0;
605
606 return iwl_legacy_is_ready(priv);
607}
608
609extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
610extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
611 u8 flags, bool clear);
612void iwl_legacy_apm_stop(struct iwl_priv *priv);
613int iwl_legacy_apm_init(struct iwl_priv *priv);
614
615int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
616 struct iwl_rxon_context *ctx);
617static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
618 struct iwl_rxon_context *ctx)
619{
620 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
621}
622static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
623 struct iwl_rxon_context *ctx)
624{
625 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
626}
627static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
628 struct iwl_priv *priv, enum ieee80211_band band)
629{
630 return priv->hw->wiphy->bands[band];
631}
632
633/* mac80211 handlers */
634int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
635void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
636void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
637 struct ieee80211_vif *vif,
638 struct ieee80211_bss_conf *bss_conf,
639 u32 changes);
640void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
641 struct ieee80211_tx_info *info,
642 __le16 fc, __le32 *tx_flags);
643
644irqreturn_t iwl_legacy_isr(int irq, void *data);
645
646#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__
64#define __iwl_legacy_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0)
218
219#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
220 CSR_FH_INT_BIT_RX_CHNL1 | \
221 CSR_FH_INT_BIT_RX_CHNL0)
222
223#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
224 CSR_FH_INT_BIT_TX_CHNL0)
225
226/* GPIO */
227#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
228#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
229#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
230
231/* RESET */
232#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
233#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
234#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
235#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
236#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
237#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
238
239/*
240 * GP (general purpose) CONTROL REGISTER
241 * Bit fields:
242 * 27: HW_RF_KILL_SW
243 * Indicates state of (platform's) hardware RF-Kill switch
244 * 26-24: POWER_SAVE_TYPE
245 * Indicates current power-saving mode:
246 * 000 -- No power saving
247 * 001 -- MAC power-down
248 * 010 -- PHY (radio) power-down
249 * 011 -- Error
250 * 9-6: SYS_CONFIG
251 * Indicates current system configuration, reflecting pins on chip
252 * as forced high/low by device circuit board.
253 * 4: GOING_TO_SLEEP
254 * Indicates MAC is entering a power-saving sleep power-down.
255 * Not a good time to access device-internal resources.
256 * 3: MAC_ACCESS_REQ
257 * Host sets this to request and maintain MAC wakeup, to allow host
258 * access to device-internal resources. Host must wait for
259 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
260 * device registers.
261 * 2: INIT_DONE
262 * Host sets this to put device into fully operational D0 power mode.
263 * Host resets this after SW_RESET to put device into low power mode.
264 * 0: MAC_CLOCK_READY
265 * Indicates MAC (ucode processor, etc.) is powered up and can run.
266 * Internal resources are accessible.
267 * NOTE: This does not indicate that the processor is actually running.
268 * NOTE: This does not indicate that 4965 or 3945 has completed
269 * init or post-power-down restore of internal SRAM memory.
270 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
271 * SRAM is restored and uCode is in normal operation mode.
272 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
273 * do not need to save/restore it.
274 * NOTE: After device reset, this bit remains "0" until host sets
275 * INIT_DONE
276 */
277#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
278#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
279#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
280#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
281
282#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
283
284#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287
288
289/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
292#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294
295/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300
301/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307
308
309/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311
312/*
313 * UCODE-DRIVER GP (general purpose) mailbox register 1
314 * Host driver and uCode write and/or read this register to communicate with
315 * each other.
316 * Bit fields:
317 * 4: UCODE_DISABLE
318 * Host sets this to request permanent halt of uCode, same as
319 * sending CARD_STATE command with "halt" bit set.
320 * 3: CT_KILL_EXIT
321 * Host sets this to request exit from CT_KILL state, i.e. host thinks
322 * device temperature is low enough to continue normal operation.
323 * 2: CMD_BLOCKED
324 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
325 * to release uCode to clear all Tx and command queues, enter
326 * unassociated mode, and power down.
327 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
328 * 1: SW_BIT_RFKILL
329 * Host sets this when issuing CARD_STATE command to request
330 * device sleep.
331 * 0: MAC_SLEEP
332 * uCode sets this when preparing a power-saving power-down.
333 * uCode resets this when power-up is complete and SRAM is sane.
334 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
335 * and must restore this data after powering back up.
336 * MAC_SLEEP is the best indication that restore is complete.
337 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
338 * do not need to save/restore it.
339 */
340#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
341#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
342#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
343#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
344
345/* GIO Chicken Bits (PCI Express bus link power management) */
346#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
347#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
348
349/* LED */
350#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
351#define CSR_LED_REG_TRUN_ON (0x78)
352#define CSR_LED_REG_TRUN_OFF (0x38)
353
354/* ANA_PLL */
355#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
356
357/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359
360/* DRAM INT TABLE */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363
364/*
365 * HBUS (Host-side Bus)
366 *
367 * HBUS registers are mapped directly into PCI bus space, but are used
368 * to indirectly access device's internal memory or registers that
369 * may be powered-down.
370 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
372 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources.
376 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC.
379 */
380#define HBUS_BASE (0x400)
381
382/*
383 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
384 * structures, error log, event log, verifying uCode load).
385 * First write to address register, then read from or write to data register
386 * to complete the job. Once the address register is set up, accesses to
387 * data registers auto-increment the address by one dword.
388 * Bit usage for address registers (read or write):
389 * 0-31: memory address within device
390 */
391#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
392#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
393#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
394#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
395
396/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
397#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
398#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
399
400/*
401 * Registers for accessing device's internal peripheral registers
402 * (e.g. SCD, BSM, etc.). First write to address register,
403 * then read from or write to data register to complete the job.
404 * Bit usage for address registers (read or write):
405 * 0-15: register address (offset) within device
406 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
407 */
408#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
409#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
410#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412
413/*
414 * Per-Tx-queue write pointer (index, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled).
416 * Bit usage:
417 * 0-7: queue write index
418 * 11-8: queue selector
419 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421
422#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..ae13112701bf
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..2d32438b4cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
395 char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 char *buf;
400 int pos = 0;
401 ssize_t ret = -ENOMEM;
402
403 ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
404 priv, true, &buf, true);
405 if (buf) {
406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
407 kfree(buf);
408 }
409 return ret;
410}
411
412static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct iwl_priv *priv = file->private_data;
417 u32 event_log_flag;
418 char buf[8];
419 int buf_size;
420
421 memset(buf, 0, sizeof(buf));
422 buf_size = min(count, sizeof(buf) - 1);
423 if (copy_from_user(buf, user_buf, buf_size))
424 return -EFAULT;
425 if (sscanf(buf, "%d", &event_log_flag) != 1)
426 return -EFAULT;
427 if (event_log_flag == 1)
428 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
429 NULL, false);
430
431 return count;
432}
433
434
435
436static ssize_t
437iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
438 size_t count, loff_t *ppos)
439{
440 struct iwl_priv *priv = file->private_data;
441 struct ieee80211_channel *channels = NULL;
442 const struct ieee80211_supported_band *supp_band = NULL;
443 int pos = 0, i, bufsz = PAGE_SIZE;
444 char *buf;
445 ssize_t ret;
446
447 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
448 return -EAGAIN;
449
450 buf = kzalloc(bufsz, GFP_KERNEL);
451 if (!buf) {
452 IWL_ERR(priv, "Can not allocate Buffer\n");
453 return -ENOMEM;
454 }
455
456 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
457 if (supp_band) {
458 channels = supp_band->channels;
459
460 pos += scnprintf(buf + pos, bufsz - pos,
461 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
462 supp_band->n_channels);
463
464 for (i = 0; i < supp_band->n_channels; i++)
465 pos += scnprintf(buf + pos, bufsz - pos,
466 "%d: %ddBm: BSS%s%s, %s.\n",
467 channels[i].hw_value,
468 channels[i].max_power,
469 channels[i].flags & IEEE80211_CHAN_RADAR ?
470 " (IEEE 802.11h required)" : "",
471 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
472 || (channels[i].flags &
473 IEEE80211_CHAN_RADAR)) ? "" :
474 ", IBSS",
475 channels[i].flags &
476 IEEE80211_CHAN_PASSIVE_SCAN ?
477 "passive only" : "active/passive");
478 }
479 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
480 if (supp_band) {
481 channels = supp_band->channels;
482
483 pos += scnprintf(buf + pos, bufsz - pos,
484 "Displaying %d channels in 5.2GHz band (802.11a)\n",
485 supp_band->n_channels);
486
487 for (i = 0; i < supp_band->n_channels; i++)
488 pos += scnprintf(buf + pos, bufsz - pos,
489 "%d: %ddBm: BSS%s%s, %s.\n",
490 channels[i].hw_value,
491 channels[i].max_power,
492 channels[i].flags & IEEE80211_CHAN_RADAR ?
493 " (IEEE 802.11h required)" : "",
494 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
495 || (channels[i].flags &
496 IEEE80211_CHAN_RADAR)) ? "" :
497 ", IBSS",
498 channels[i].flags &
499 IEEE80211_CHAN_PASSIVE_SCAN ?
500 "passive only" : "active/passive");
501 }
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
506
507static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
508 char __user *user_buf,
509 size_t count, loff_t *ppos) {
510
511 struct iwl_priv *priv = file->private_data;
512 char buf[512];
513 int pos = 0;
514 const size_t bufsz = sizeof(buf);
515
516 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
517 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
518 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
519 test_bit(STATUS_INT_ENABLED, &priv->status));
520 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
521 test_bit(STATUS_RF_KILL_HW, &priv->status));
522 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
523 test_bit(STATUS_CT_KILL, &priv->status));
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
525 test_bit(STATUS_INIT, &priv->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
527 test_bit(STATUS_ALIVE, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
529 test_bit(STATUS_READY, &priv->status));
530 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
531 test_bit(STATUS_TEMPERATURE, &priv->status));
532 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
533 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
534 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
535 test_bit(STATUS_EXIT_PENDING, &priv->status));
536 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
537 test_bit(STATUS_STATISTICS, &priv->status));
538 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
539 test_bit(STATUS_SCANNING, &priv->status));
540 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
541 test_bit(STATUS_SCAN_ABORTING, &priv->status));
542 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
543 test_bit(STATUS_SCAN_HW, &priv->status));
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
545 test_bit(STATUS_POWER_PMI, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
547 test_bit(STATUS_FW_ERROR, &priv->status));
548 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
549}
550
551static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
552 char __user *user_buf,
553 size_t count, loff_t *ppos) {
554
555 struct iwl_priv *priv = file->private_data;
556 int pos = 0;
557 int cnt = 0;
558 char *buf;
559 int bufsz = 24 * 64; /* 24 items * 64 char per item */
560 ssize_t ret;
561
562 buf = kzalloc(bufsz, GFP_KERNEL);
563 if (!buf) {
564 IWL_ERR(priv, "Can not allocate Buffer\n");
565 return -ENOMEM;
566 }
567
568 pos += scnprintf(buf + pos, bufsz - pos,
569 "Interrupt Statistics Report:\n");
570
571 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
572 priv->isr_stats.hw);
573 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
574 priv->isr_stats.sw);
575 if (priv->isr_stats.sw || priv->isr_stats.hw) {
576 pos += scnprintf(buf + pos, bufsz - pos,
577 "\tLast Restarting Code: 0x%X\n",
578 priv->isr_stats.err_code);
579 }
580#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
581 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
582 priv->isr_stats.sch);
583 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
584 priv->isr_stats.alive);
585#endif
586 pos += scnprintf(buf + pos, bufsz - pos,
587 "HW RF KILL switch toggled:\t %u\n",
588 priv->isr_stats.rfkill);
589
590 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
591 priv->isr_stats.ctkill);
592
593 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 priv->isr_stats.wakeup);
595
596 pos += scnprintf(buf + pos, bufsz - pos,
597 "Rx command responses:\t\t %u\n",
598 priv->isr_stats.rx);
599 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
600 if (priv->isr_stats.rx_handlers[cnt] > 0)
601 pos += scnprintf(buf + pos, bufsz - pos,
602 "\tRx handler[%36s]:\t\t %u\n",
603 iwl_legacy_get_cmd_string(cnt),
604 priv->isr_stats.rx_handlers[cnt]);
605 }
606
607 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
608 priv->isr_stats.tx);
609
610 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
611 priv->isr_stats.unhandled);
612
613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
614 kfree(buf);
615 return ret;
616}
617
618static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
619 const char __user *user_buf,
620 size_t count, loff_t *ppos)
621{
622 struct iwl_priv *priv = file->private_data;
623 char buf[8];
624 int buf_size;
625 u32 reset_flag;
626
627 memset(buf, 0, sizeof(buf));
628 buf_size = min(count, sizeof(buf) - 1);
629 if (copy_from_user(buf, user_buf, buf_size))
630 return -EFAULT;
631 if (sscanf(buf, "%x", &reset_flag) != 1)
632 return -EFAULT;
633 if (reset_flag == 0)
634 iwl_legacy_clear_isr_stats(priv);
635
636 return count;
637}
638
639static ssize_t
640iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
641 size_t count, loff_t *ppos)
642{
643 struct iwl_priv *priv = file->private_data;
644 struct iwl_rxon_context *ctx;
645 int pos = 0, i;
646 char buf[256 * NUM_IWL_RXON_CTX];
647 const size_t bufsz = sizeof(buf);
648
649 for_each_context(priv, ctx) {
650 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
651 ctx->ctxid);
652 for (i = 0; i < AC_NUM; i++) {
653 pos += scnprintf(buf + pos, bufsz - pos,
654 "\tcw_min\tcw_max\taifsn\ttxop\n");
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
661 }
662 pos += scnprintf(buf + pos, bufsz - pos, "\n");
663 }
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
668 const char __user *user_buf,
669 size_t count, loff_t *ppos)
670{
671 struct iwl_priv *priv = file->private_data;
672 char buf[8];
673 int buf_size;
674 int ht40;
675
676 memset(buf, 0, sizeof(buf));
677 buf_size = min(count, sizeof(buf) - 1);
678 if (copy_from_user(buf, user_buf, buf_size))
679 return -EFAULT;
680 if (sscanf(buf, "%d", &ht40) != 1)
681 return -EFAULT;
682 if (!iwl_legacy_is_any_associated(priv))
683 priv->disable_ht40 = ht40 ? true : false;
684 else {
685 IWL_ERR(priv, "Sta associated with AP - "
686 "Change to 40MHz channel support is not allowed\n");
687 return -EINVAL;
688 }
689
690 return count;
691}
692
693static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
694 char __user *user_buf,
695 size_t count, loff_t *ppos)
696{
697 struct iwl_priv *priv = file->private_data;
698 char buf[100];
699 int pos = 0;
700 const size_t bufsz = sizeof(buf);
701
702 pos += scnprintf(buf + pos, bufsz - pos,
703 "11n 40MHz Mode: %s\n",
704 priv->disable_ht40 ? "Disabled" : "Enabled");
705 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
706}
707
708DEBUGFS_READ_WRITE_FILE_OPS(sram);
709DEBUGFS_READ_WRITE_FILE_OPS(log_event);
710DEBUGFS_READ_FILE_OPS(nvm);
711DEBUGFS_READ_FILE_OPS(stations);
712DEBUGFS_READ_FILE_OPS(channels);
713DEBUGFS_READ_FILE_OPS(status);
714DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
715DEBUGFS_READ_FILE_OPS(qos);
716DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
717
718static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
721{
722 struct iwl_priv *priv = file->private_data;
723 int pos = 0, ofs = 0;
724 int cnt = 0, entry;
725 struct iwl_tx_queue *txq;
726 struct iwl_queue *q;
727 struct iwl_rx_queue *rxq = &priv->rxq;
728 char *buf;
729 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
730 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
731 const u8 *ptr;
732 ssize_t ret;
733
734 if (!priv->txq) {
735 IWL_ERR(priv, "txq not ready\n");
736 return -EAGAIN;
737 }
738 buf = kzalloc(bufsz, GFP_KERNEL);
739 if (!buf) {
740 IWL_ERR(priv, "Can not allocate buffer\n");
741 return -ENOMEM;
742 }
743 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
744 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
745 txq = &priv->txq[cnt];
746 q = &txq->q;
747 pos += scnprintf(buf + pos, bufsz - pos,
748 "q[%d]: read_ptr: %u, write_ptr: %u\n",
749 cnt, q->read_ptr, q->write_ptr);
750 }
751 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
752 ptr = priv->tx_traffic;
753 pos += scnprintf(buf + pos, bufsz - pos,
754 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
755 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
756 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
757 entry++, ofs += 16) {
758 pos += scnprintf(buf + pos, bufsz - pos,
759 "0x%.4x ", ofs);
760 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
761 buf + pos, bufsz - pos, 0);
762 pos += strlen(buf + pos);
763 if (bufsz - pos > 0)
764 buf[pos++] = '\n';
765 }
766 }
767 }
768
769 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
770 pos += scnprintf(buf + pos, bufsz - pos,
771 "read: %u, write: %u\n",
772 rxq->read, rxq->write);
773
774 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
775 ptr = priv->rx_traffic;
776 pos += scnprintf(buf + pos, bufsz - pos,
777 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
778 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
779 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
780 entry++, ofs += 16) {
781 pos += scnprintf(buf + pos, bufsz - pos,
782 "0x%.4x ", ofs);
783 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
784 buf + pos, bufsz - pos, 0);
785 pos += strlen(buf + pos);
786 if (bufsz - pos > 0)
787 buf[pos++] = '\n';
788 }
789 }
790 }
791
792 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
793 kfree(buf);
794 return ret;
795}
796
797static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
798 const char __user *user_buf,
799 size_t count, loff_t *ppos)
800{
801 struct iwl_priv *priv = file->private_data;
802 char buf[8];
803 int buf_size;
804 int traffic_log;
805
806 memset(buf, 0, sizeof(buf));
807 buf_size = min(count, sizeof(buf) - 1);
808 if (copy_from_user(buf, user_buf, buf_size))
809 return -EFAULT;
810 if (sscanf(buf, "%d", &traffic_log) != 1)
811 return -EFAULT;
812 if (traffic_log == 0)
813 iwl_legacy_reset_traffic_log(priv);
814
815 return count;
816}
817
818static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
819 char __user *user_buf,
820 size_t count, loff_t *ppos) {
821
822 struct iwl_priv *priv = file->private_data;
823 struct iwl_tx_queue *txq;
824 struct iwl_queue *q;
825 char *buf;
826 int pos = 0;
827 int cnt;
828 int ret;
829 const size_t bufsz = sizeof(char) * 64 *
830 priv->cfg->base_params->num_of_queues;
831
832 if (!priv->txq) {
833 IWL_ERR(priv, "txq not ready\n");
834 return -EAGAIN;
835 }
836 buf = kzalloc(bufsz, GFP_KERNEL);
837 if (!buf)
838 return -ENOMEM;
839
840 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
841 txq = &priv->txq[cnt];
842 q = &txq->q;
843 pos += scnprintf(buf + pos, bufsz - pos,
844 "hwq %.2d: read=%u write=%u stop=%d"
845 " swq_id=%#.2x (ac %d/hwq %d)\n",
846 cnt, q->read_ptr, q->write_ptr,
847 !!test_bit(cnt, priv->queue_stopped),
848 txq->swq_id, txq->swq_id & 3,
849 (txq->swq_id >> 2) & 0x1f);
850 if (cnt >= 4)
851 continue;
852 /* for the ACs, display the stop count too */
853 pos += scnprintf(buf + pos, bufsz - pos,
854 " stop-count: %d\n",
855 atomic_read(&priv->queue_stop_count[cnt]));
856 }
857 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
858 kfree(buf);
859 return ret;
860}
861
862static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
863 char __user *user_buf,
864 size_t count, loff_t *ppos) {
865
866 struct iwl_priv *priv = file->private_data;
867 struct iwl_rx_queue *rxq = &priv->rxq;
868 char buf[256];
869 int pos = 0;
870 const size_t bufsz = sizeof(buf);
871
872 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
873 rxq->read);
874 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
875 rxq->write);
876 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
877 rxq->free_count);
878 if (rxq->rb_stts) {
879 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
880 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
881 } else {
882 pos += scnprintf(buf + pos, bufsz - pos,
883 "closed_rb_num: Not Allocated\n");
884 }
885 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
886}
887
888static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
889 char __user *user_buf,
890 size_t count, loff_t *ppos)
891{
892 struct iwl_priv *priv = file->private_data;
893 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
894 user_buf, count, ppos);
895}
896
897static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
898 char __user *user_buf,
899 size_t count, loff_t *ppos)
900{
901 struct iwl_priv *priv = file->private_data;
902 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
903 user_buf, count, ppos);
904}
905
906static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
907 char __user *user_buf,
908 size_t count, loff_t *ppos)
909{
910 struct iwl_priv *priv = file->private_data;
911 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
912 user_buf, count, ppos);
913}
914
915static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
916 char __user *user_buf,
917 size_t count, loff_t *ppos) {
918
919 struct iwl_priv *priv = file->private_data;
920 int pos = 0;
921 int cnt = 0;
922 char *buf;
923 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
924 ssize_t ret;
925 struct iwl_sensitivity_data *data;
926
927 data = &priv->sensitivity_data;
928 buf = kzalloc(bufsz, GFP_KERNEL);
929 if (!buf) {
930 IWL_ERR(priv, "Can not allocate Buffer\n");
931 return -ENOMEM;
932 }
933
934 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
935 data->auto_corr_ofdm);
936 pos += scnprintf(buf + pos, bufsz - pos,
937 "auto_corr_ofdm_mrc:\t\t %u\n",
938 data->auto_corr_ofdm_mrc);
939 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
940 data->auto_corr_ofdm_x1);
941 pos += scnprintf(buf + pos, bufsz - pos,
942 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
943 data->auto_corr_ofdm_mrc_x1);
944 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
945 data->auto_corr_cck);
946 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
947 data->auto_corr_cck_mrc);
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
950 data->last_bad_plcp_cnt_ofdm);
951 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
952 data->last_fa_cnt_ofdm);
953 pos += scnprintf(buf + pos, bufsz - pos,
954 "last_bad_plcp_cnt_cck:\t\t %u\n",
955 data->last_bad_plcp_cnt_cck);
956 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
957 data->last_fa_cnt_cck);
958 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
959 data->nrg_curr_state);
960 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
961 data->nrg_prev_state);
962 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
963 for (cnt = 0; cnt < 10; cnt++) {
964 pos += scnprintf(buf + pos, bufsz - pos, " %u",
965 data->nrg_value[cnt]);
966 }
967 pos += scnprintf(buf + pos, bufsz - pos, "\n");
968 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
969 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
970 pos += scnprintf(buf + pos, bufsz - pos, " %u",
971 data->nrg_silence_rssi[cnt]);
972 }
973 pos += scnprintf(buf + pos, bufsz - pos, "\n");
974 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
975 data->nrg_silence_ref);
976 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
977 data->nrg_energy_idx);
978 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
979 data->nrg_silence_idx);
980 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
981 data->nrg_th_cck);
982 pos += scnprintf(buf + pos, bufsz - pos,
983 "nrg_auto_corr_silence_diff:\t %u\n",
984 data->nrg_auto_corr_silence_diff);
985 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
986 data->num_in_cck_no_fa);
987 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
988 data->nrg_th_ofdm);
989
990 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
991 kfree(buf);
992 return ret;
993}
994
995
996static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
997 char __user *user_buf,
998 size_t count, loff_t *ppos) {
999
1000 struct iwl_priv *priv = file->private_data;
1001 int pos = 0;
1002 int cnt = 0;
1003 char *buf;
1004 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1005 ssize_t ret;
1006 struct iwl_chain_noise_data *data;
1007
1008 data = &priv->chain_noise_data;
1009 buf = kzalloc(bufsz, GFP_KERNEL);
1010 if (!buf) {
1011 IWL_ERR(priv, "Can not allocate Buffer\n");
1012 return -ENOMEM;
1013 }
1014
1015 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1016 data->active_chains);
1017 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1018 data->chain_noise_a);
1019 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1020 data->chain_noise_b);
1021 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1022 data->chain_noise_c);
1023 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1024 data->chain_signal_a);
1025 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1026 data->chain_signal_b);
1027 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1028 data->chain_signal_c);
1029 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1030 data->beacon_count);
1031
1032 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1033 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1034 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1035 data->disconn_array[cnt]);
1036 }
1037 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1038 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1039 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1040 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1041 data->delta_gain_code[cnt]);
1042 }
1043 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1044 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1045 data->radio_write);
1046 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1047 data->state);
1048
1049 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1050 kfree(buf);
1051 return ret;
1052}
1053
1054static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1055 char __user *user_buf,
1056 size_t count, loff_t *ppos)
1057{
1058 struct iwl_priv *priv = file->private_data;
1059 char buf[60];
1060 int pos = 0;
1061 const size_t bufsz = sizeof(buf);
1062 u32 pwrsave_status;
1063
1064 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1065 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1066
1067 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1068 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1069 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1070 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1071 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1072 "error");
1073
1074 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1075}
1076
1077static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1078 const char __user *user_buf,
1079 size_t count, loff_t *ppos)
1080{
1081 struct iwl_priv *priv = file->private_data;
1082 char buf[8];
1083 int buf_size;
1084 int clear;
1085
1086 memset(buf, 0, sizeof(buf));
1087 buf_size = min(count, sizeof(buf) - 1);
1088 if (copy_from_user(buf, user_buf, buf_size))
1089 return -EFAULT;
1090 if (sscanf(buf, "%d", &clear) != 1)
1091 return -EFAULT;
1092
1093 /* make request to uCode to retrieve statistics information */
1094 mutex_lock(&priv->mutex);
1095 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1096 mutex_unlock(&priv->mutex);
1097
1098 return count;
1099}
1100
1101static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
1102 char __user *user_buf,
1103 size_t count, loff_t *ppos) {
1104
1105 struct iwl_priv *priv = file->private_data;
1106 int pos = 0;
1107 char buf[128];
1108 const size_t bufsz = sizeof(buf);
1109
1110 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1111 priv->event_log.ucode_trace ? "On" : "Off");
1112 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1113 priv->event_log.non_wraps_count);
1114 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1115 priv->event_log.wraps_once_count);
1116 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1117 priv->event_log.wraps_more_count);
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
1123 const char __user *user_buf,
1124 size_t count, loff_t *ppos)
1125{
1126 struct iwl_priv *priv = file->private_data;
1127 char buf[8];
1128 int buf_size;
1129 int trace;
1130
1131 memset(buf, 0, sizeof(buf));
1132 buf_size = min(count, sizeof(buf) - 1);
1133 if (copy_from_user(buf, user_buf, buf_size))
1134 return -EFAULT;
1135 if (sscanf(buf, "%d", &trace) != 1)
1136 return -EFAULT;
1137
1138 if (trace) {
1139 priv->event_log.ucode_trace = true;
1140 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1141 mod_timer(&priv->ucode_trace,
1142 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1143 } else {
1144 priv->event_log.ucode_trace = false;
1145 del_timer_sync(&priv->ucode_trace);
1146 }
1147
1148 return count;
1149}
1150
1151static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1152 char __user *user_buf,
1153 size_t count, loff_t *ppos) {
1154
1155 struct iwl_priv *priv = file->private_data;
1156 int len = 0;
1157 char buf[20];
1158
1159 len = sprintf(buf, "0x%04X\n",
1160 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1161 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1162}
1163
1164static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1165 char __user *user_buf,
1166 size_t count, loff_t *ppos) {
1167
1168 struct iwl_priv *priv = file->private_data;
1169 int len = 0;
1170 char buf[20];
1171
1172 len = sprintf(buf, "0x%04X\n",
1173 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1178 char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181 struct iwl_priv *priv = file->private_data;
1182 char *buf;
1183 int pos = 0;
1184 ssize_t ret = -EFAULT;
1185
1186 if (priv->cfg->ops->lib->dump_fh) {
1187 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1188 if (buf) {
1189 ret = simple_read_from_buffer(user_buf,
1190 count, ppos, buf, pos);
1191 kfree(buf);
1192 }
1193 }
1194
1195 return ret;
1196}
1197
1198static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1199 char __user *user_buf,
1200 size_t count, loff_t *ppos) {
1201
1202 struct iwl_priv *priv = file->private_data;
1203 int pos = 0;
1204 char buf[12];
1205 const size_t bufsz = sizeof(buf);
1206
1207 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 priv->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1214 const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct iwl_priv *priv = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1231 priv->missed_beacon_threshold =
1232 IWL_MISSED_BEACON_THRESHOLD_DEF;
1233 else
1234 priv->missed_beacon_threshold = missed;
1235
1236 return count;
1237}
1238
1239static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
1240 char __user *user_buf,
1241 size_t count, loff_t *ppos) {
1242
1243 struct iwl_priv *priv = file->private_data;
1244 int pos = 0;
1245 char buf[12];
1246 const size_t bufsz = sizeof(buf);
1247
1248 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
1249 priv->cfg->base_params->plcp_delta_threshold);
1250
1251 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1252}
1253
1254static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
1255 const char __user *user_buf,
1256 size_t count, loff_t *ppos) {
1257
1258 struct iwl_priv *priv = file->private_data;
1259 char buf[8];
1260 int buf_size;
1261 int plcp;
1262
1263 memset(buf, 0, sizeof(buf));
1264 buf_size = min(count, sizeof(buf) - 1);
1265 if (copy_from_user(buf, user_buf, buf_size))
1266 return -EFAULT;
1267 if (sscanf(buf, "%d", &plcp) != 1)
1268 return -EINVAL;
1269 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1270 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1271 priv->cfg->base_params->plcp_delta_threshold =
1272 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1273 else
1274 priv->cfg->base_params->plcp_delta_threshold = plcp;
1275 return count;
1276}
1277
1278static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1279 char __user *user_buf,
1280 size_t count, loff_t *ppos) {
1281
1282 struct iwl_priv *priv = file->private_data;
1283 int i, pos = 0;
1284 char buf[300];
1285 const size_t bufsz = sizeof(buf);
1286 struct iwl_force_reset *force_reset;
1287
1288 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
1289 force_reset = &priv->force_reset[i];
1290 pos += scnprintf(buf + pos, bufsz - pos,
1291 "Force reset method %d\n", i);
1292 pos += scnprintf(buf + pos, bufsz - pos,
1293 "\tnumber of reset request: %d\n",
1294 force_reset->reset_request_count);
1295 pos += scnprintf(buf + pos, bufsz - pos,
1296 "\tnumber of reset request success: %d\n",
1297 force_reset->reset_success_count);
1298 pos += scnprintf(buf + pos, bufsz - pos,
1299 "\tnumber of reset request reject: %d\n",
1300 force_reset->reset_reject_count);
1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 "\treset duration: %lu\n",
1303 force_reset->reset_duration);
1304 }
1305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1306}
1307
1308static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1309 const char __user *user_buf,
1310 size_t count, loff_t *ppos) {
1311
1312 struct iwl_priv *priv = file->private_data;
1313 char buf[8];
1314 int buf_size;
1315 int reset, ret;
1316
1317 memset(buf, 0, sizeof(buf));
1318 buf_size = min(count, sizeof(buf) - 1);
1319 if (copy_from_user(buf, user_buf, buf_size))
1320 return -EFAULT;
1321 if (sscanf(buf, "%d", &reset) != 1)
1322 return -EINVAL;
1323 switch (reset) {
1324 case IWL_RF_RESET:
1325 case IWL_FW_RESET:
1326 ret = iwl_legacy_force_reset(priv, reset, true);
1327 break;
1328 default:
1329 return -EINVAL;
1330 }
1331 return ret ? ret : count;
1332}
1333
1334static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1335 const char __user *user_buf,
1336 size_t count, loff_t *ppos) {
1337
1338 struct iwl_priv *priv = file->private_data;
1339 char buf[8];
1340 int buf_size;
1341 int timeout;
1342
1343 memset(buf, 0, sizeof(buf));
1344 buf_size = min(count, sizeof(buf) - 1);
1345 if (copy_from_user(buf, user_buf, buf_size))
1346 return -EFAULT;
1347 if (sscanf(buf, "%d", &timeout) != 1)
1348 return -EINVAL;
1349 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1350 timeout = IWL_DEF_WD_TIMEOUT;
1351
1352 priv->cfg->base_params->wd_timeout = timeout;
1353 iwl_legacy_setup_watchdog(priv);
1354 return count;
1355}
1356
1357DEBUGFS_READ_FILE_OPS(rx_statistics);
1358DEBUGFS_READ_FILE_OPS(tx_statistics);
1359DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1360DEBUGFS_READ_FILE_OPS(rx_queue);
1361DEBUGFS_READ_FILE_OPS(tx_queue);
1362DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1363DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1364DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1365DEBUGFS_READ_FILE_OPS(sensitivity);
1366DEBUGFS_READ_FILE_OPS(chain_noise);
1367DEBUGFS_READ_FILE_OPS(power_save_status);
1368DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1369DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1370DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1371DEBUGFS_READ_FILE_OPS(fh_reg);
1372DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1373DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
1374DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1375DEBUGFS_READ_FILE_OPS(rxon_flags);
1376DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1377DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1378
1379/*
1380 * Create the debugfs files and directories
1381 *
1382 */
1383int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1384{
1385 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1386 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1387
1388 dir_drv = debugfs_create_dir(name, phyd);
1389 if (!dir_drv)
1390 return -ENOMEM;
1391
1392 priv->debugfs_dir = dir_drv;
1393
1394 dir_data = debugfs_create_dir("data", dir_drv);
1395 if (!dir_data)
1396 goto err;
1397 dir_rf = debugfs_create_dir("rf", dir_drv);
1398 if (!dir_rf)
1399 goto err;
1400 dir_debug = debugfs_create_dir("debug", dir_drv);
1401 if (!dir_debug)
1402 goto err;
1403
1404 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1405 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1406 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
1407 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1408 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1409 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1410 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1411 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1412 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1413 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1414 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1415 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1416 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1417 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1418 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1419 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1420 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1421 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1422 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1423 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
1424 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1425 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1426 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1427 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1428
1429 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1430 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1431 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1432 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1433 if (priv->cfg->base_params->ucode_tracing)
1434 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1435 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1436 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1437 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1438 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1439 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1440 &priv->disable_sens_cal);
1441 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1442 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1443 &priv->disable_chain_noise_cal);
1444 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1445 &priv->disable_tx_power_cal);
1446 return 0;
1447
1448err:
1449 IWL_ERR(priv, "Can't create the debugfs directory\n");
1450 iwl_legacy_dbgfs_unregister(priv);
1451 return -ENOMEM;
1452}
1453EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1454
1455/**
1456 * Remove the debugfs files and directories
1457 *
1458 */
1459void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1460{
1461 if (!priv->debugfs_dir)
1462 return;
1463
1464 debugfs_remove_recursive(priv->debugfs_dir);
1465 priv->debugfs_dir = NULL;
1466}
1467EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..9ee849d669f3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <linux/leds.h>
38#include <linux/wait.h>
39#include <net/ieee80211_radiotap.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-led.h"
49#include "iwl-power.h"
50#include "iwl-legacy-rs.h"
51
52struct iwl_tx_queue;
53
54/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
56
57/* Default noise level to report when noise measurement is not available.
58 * This may be because we're:
59 * 1) Not associated (4965, no beacon statistics being sent to driver)
60 * 2) Scanning (noise measurement does not apply to associated channel)
61 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
62 * Use default noise value of -127 ... this is below the range of measurable
63 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
64 * Also, -127 works better than 0 when averaging frames with/without
65 * noise info (e.g. averaging might be done in app); measured dBm values are
66 * always negative ... using a negative value as the default keeps all
67 * averages within an s8's (used in some apps) range of negative values. */
68#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
69
70/*
71 * RTS threshold here is total size [2347] minus 4 FCS bytes
72 * Per spec:
73 * a value of 0 means RTS on all data/management packets
74 * a value > max MSDU size means no RTS
75 * else RTS for data/management frames where MPDU is larger
76 * than RTS value.
77 */
78#define DEFAULT_RTS_THRESHOLD 2347U
79#define MIN_RTS_THRESHOLD 0U
80#define MAX_RTS_THRESHOLD 2347U
81#define MAX_MSDU_SIZE 2304U
82#define MAX_MPDU_SIZE 2346U
83#define DEFAULT_BEACON_INTERVAL 100U
84#define DEFAULT_SHORT_RETRY_LIMIT 7U
85#define DEFAULT_LONG_RETRY_LIMIT 4U
86
87struct iwl_rx_mem_buffer {
88 dma_addr_t page_dma;
89 struct page *page;
90 struct list_head list;
91};
92
93#define rxb_addr(r) page_address(r->page)
94
95/* defined below */
96struct iwl_device_cmd;
97
98struct iwl_cmd_meta {
99 /* only for SYNC commands, iff the reply skb is wanted */
100 struct iwl_host_cmd *source;
101 /*
102 * only for ASYNC commands
103 * (which is somewhat stupid -- look at iwl-sta.c for instance
104 * which duplicates a bunch of code because the callback isn't
105 * invoked for SYNC commands, if it were and its result passed
106 * through it would be simpler...)
107 */
108 void (*callback)(struct iwl_priv *priv,
109 struct iwl_device_cmd *cmd,
110 struct iwl_rx_packet *pkt);
111
112 /* The CMD_SIZE_HUGE flag bit indicates that the command
113 * structure is stored at the end of the shared queue memory. */
114 u32 flags;
115
116 DEFINE_DMA_UNMAP_ADDR(mapping);
117 DEFINE_DMA_UNMAP_LEN(len);
118};
119
120/*
121 * Generic queue structure
122 *
123 * Contains common data for Rx and Tx queues
124 */
125struct iwl_queue {
126 int n_bd; /* number of BDs in this queue */
127 int write_ptr; /* 1-st empty entry (index) host_w*/
128 int read_ptr; /* last used entry (index) host_r*/
129 /* use for monitoring and recovering the stuck queue */
130 dma_addr_t dma_addr; /* physical addr for BD's */
131 int n_window; /* safe queue window */
132 u32 id;
133 int low_mark; /* low watermark, resume queue if free
134 * space more than this */
135 int high_mark; /* high watermark, stop queue if free
136 * space less than this */
137} __packed;
138
139/* One for each TFD */
140struct iwl_tx_info {
141 struct sk_buff *skb;
142 struct iwl_rxon_context *ctx;
143};
144
145/**
146 * struct iwl_tx_queue - Tx Queue for DMA
147 * @q: generic Rx/Tx queue descriptor
148 * @bd: base of circular buffer of TFDs
149 * @cmd: array of command/TX buffer pointers
150 * @meta: array of meta data for each command/tx buffer
151 * @dma_addr_cmd: physical address of cmd/tx buffer array
152 * @txb: array of per-TFD driver data
153 * @time_stamp: time (in jiffies) of last read_ptr change
154 * @need_update: indicates need to update read/write index
155 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
156 *
157 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
158 * descriptors) and required locking structures.
159 */
160#define TFD_TX_CMD_SLOTS 256
161#define TFD_CMD_SLOTS 32
162
163struct iwl_tx_queue {
164 struct iwl_queue q;
165 void *tfds;
166 struct iwl_device_cmd **cmd;
167 struct iwl_cmd_meta *meta;
168 struct iwl_tx_info *txb;
169 unsigned long time_stamp;
170 u8 need_update;
171 u8 sched_retry;
172 u8 active;
173 u8 swq_id;
174};
175
176#define IWL_NUM_SCAN_RATES (2)
177
178struct iwl4965_channel_tgd_info {
179 u8 type;
180 s8 max_power;
181};
182
183struct iwl4965_channel_tgh_info {
184 s64 last_radar_time;
185};
186
187#define IWL4965_MAX_RATE (33)
188
189struct iwl3945_clip_group {
190 /* maximum power level to prevent clipping for each rate, derived by
191 * us from this band's saturation power in EEPROM */
192 const s8 clip_powers[IWL_MAX_RATES];
193};
194
195/* current Tx power values to use, one for each rate for each channel.
196 * requested power is limited by:
197 * -- regulatory EEPROM limits for this channel
198 * -- hardware capabilities (clip-powers)
199 * -- spectrum management
200 * -- user preference (e.g. iwconfig)
201 * when requested power is set, base power index must also be set. */
202struct iwl3945_channel_power_info {
203 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
204 s8 power_table_index; /* actual (compenst'd) index into gain table */
205 s8 base_power_index; /* gain index for power at factory temp. */
206 s8 requested_power; /* power (dBm) requested for this chnl/rate */
207};
208
209/* current scan Tx power values to use, one for each scan rate for each
210 * channel. */
211struct iwl3945_scan_power_info {
212 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
213 s8 power_table_index; /* actual (compenst'd) index into gain table */
214 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
215};
216
217/*
218 * One for each channel, holds all channel setup data
219 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
220 * with one another!
221 */
222struct iwl_channel_info {
223 struct iwl4965_channel_tgd_info tgd;
224 struct iwl4965_channel_tgh_info tgh;
225 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
226 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
227 * HT40 channel */
228
229 u8 channel; /* channel number */
230 u8 flags; /* flags copied from EEPROM */
231 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
232 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
233 s8 min_power; /* always 0 */
234 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
235
236 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
237 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
238 enum ieee80211_band band;
239
240 /* HT40 channel info */
241 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
242 u8 ht40_flags; /* flags copied from EEPROM */
243 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
244
245 /* Radio/DSP gain settings for each "normal" data Tx rate.
246 * These include, in addition to RF and DSP gain, a few fields for
247 * remembering/modifying gain settings (indexes). */
248 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
249
250 /* Radio/DSP gain settings for each scan rate, for directed scans. */
251 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
252};
253
254#define IWL_TX_FIFO_BK 0 /* shared */
255#define IWL_TX_FIFO_BE 1
256#define IWL_TX_FIFO_VI 2 /* shared */
257#define IWL_TX_FIFO_VO 3
258#define IWL_TX_FIFO_UNUSED -1
259
260/* Minimum number of queues. MAX_NUM is defined in hw specific files.
261 * Set the minimum to accommodate the 4 standard TX queues, 1 command
262 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
263#define IWL_MIN_NUM_QUEUES 10
264
265#define IWL_DEFAULT_CMD_QUEUE_NUM 4
266
267#define IEEE80211_DATA_LEN 2304
268#define IEEE80211_4ADDR_LEN 30
269#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
270#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
271
272struct iwl_frame {
273 union {
274 struct ieee80211_hdr frame;
275 struct iwl_tx_beacon_cmd beacon;
276 u8 raw[IEEE80211_FRAME_LEN];
277 u8 cmd[360];
278 } u;
279 struct list_head list;
280};
281
282#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
283#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
284#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
285
286enum {
287 CMD_SYNC = 0,
288 CMD_SIZE_NORMAL = 0,
289 CMD_NO_SKB = 0,
290 CMD_SIZE_HUGE = (1 << 0),
291 CMD_ASYNC = (1 << 1),
292 CMD_WANT_SKB = (1 << 2),
293};
294
295#define DEF_CMD_PAYLOAD_SIZE 320
296
297/**
298 * struct iwl_device_cmd
299 *
300 * For allocation of the command and tx queues, this establishes the overall
301 * size of the largest command we send to uCode, except for a scan command
302 * (which is relatively huge; space is allocated separately).
303 */
304struct iwl_device_cmd {
305 struct iwl_cmd_header hdr; /* uCode API */
306 union {
307 u32 flags;
308 u8 val8;
309 u16 val16;
310 u32 val32;
311 struct iwl_tx_cmd tx;
312 u8 payload[DEF_CMD_PAYLOAD_SIZE];
313 } __packed cmd;
314} __packed;
315
316#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
317
318
319struct iwl_host_cmd {
320 const void *data;
321 unsigned long reply_page;
322 void (*callback)(struct iwl_priv *priv,
323 struct iwl_device_cmd *cmd,
324 struct iwl_rx_packet *pkt);
325 u32 flags;
326 u16 len;
327 u8 id;
328};
329
330#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
331#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
332#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
333
334/**
335 * struct iwl_rx_queue - Rx queue
336 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
337 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
338 * @read: Shared index to newest available Rx buffer
339 * @write: Shared index to oldest written Rx packet
340 * @free_count: Number of pre-allocated buffers in rx_free
341 * @rx_free: list of free SKBs for use
342 * @rx_used: List of Rx buffers with no SKB
343 * @need_update: flag to indicate we need to update read/write index
344 * @rb_stts: driver's pointer to receive buffer status
345 * @rb_stts_dma: bus address of receive buffer status
346 *
347 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
348 */
349struct iwl_rx_queue {
350 __le32 *bd;
351 dma_addr_t bd_dma;
352 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
353 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
354 u32 read;
355 u32 write;
356 u32 free_count;
357 u32 write_actual;
358 struct list_head rx_free;
359 struct list_head rx_used;
360 int need_update;
361 struct iwl_rb_status *rb_stts;
362 dma_addr_t rb_stts_dma;
363 spinlock_t lock;
364};
365
366#define IWL_SUPPORTED_RATES_IE_LEN 8
367
368#define MAX_TID_COUNT 9
369
370#define IWL_INVALID_RATE 0xFF
371#define IWL_INVALID_VALUE -1
372
373/**
374 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
375 * @txq_id: Tx queue used for Tx attempt
376 * @frame_count: # frames attempted by Tx command
377 * @wait_for_ba: Expect block-ack before next Tx reply
378 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
379 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
380 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
381 * @rate_n_flags: Rate at which Tx was attempted
382 *
383 * If REPLY_TX indicates that aggregation was attempted, driver must wait
384 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
385 * until block ack arrives.
386 */
387struct iwl_ht_agg {
388 u16 txq_id;
389 u16 frame_count;
390 u16 wait_for_ba;
391 u16 start_idx;
392 u64 bitmap;
393 u32 rate_n_flags;
394#define IWL_AGG_OFF 0
395#define IWL_AGG_ON 1
396#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
397#define IWL_EMPTYING_HW_QUEUE_DELBA 3
398 u8 state;
399};
400
401
402struct iwl_tid_data {
403 u16 seq_number; /* 4965 only */
404 u16 tfds_in_queue;
405 struct iwl_ht_agg agg;
406};
407
408struct iwl_hw_key {
409 u32 cipher;
410 int keylen;
411 u8 keyidx;
412 u8 key[32];
413};
414
415union iwl_ht_rate_supp {
416 u16 rates;
417 struct {
418 u8 siso_rate;
419 u8 mimo_rate;
420 };
421};
422
423#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
424#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
425#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
426#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
427#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
428#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
429#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
430
431/*
432 * Maximal MPDU density for TX aggregation
433 * 4 - 2us density
434 * 5 - 4us density
435 * 6 - 8us density
436 * 7 - 16us density
437 */
438#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
439#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
440#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
441#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
442#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
443#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
444#define CFG_HT_MPDU_DENSITY_MIN (0x1)
445
446struct iwl_ht_config {
447 bool single_chain_sufficient;
448 enum ieee80211_smps_mode smps; /* current smps mode */
449};
450
451/* QoS structures */
452struct iwl_qos_info {
453 int qos_active;
454 struct iwl_qosparam_cmd def_qos_parm;
455};
456
457/*
458 * Structure should be accessed with sta_lock held. When station addition
459 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
460 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
461 * sta_lock held.
462 */
463struct iwl_station_entry {
464 struct iwl_legacy_addsta_cmd sta;
465 struct iwl_tid_data tid[MAX_TID_COUNT];
466 u8 used, ctxid;
467 struct iwl_hw_key keyinfo;
468 struct iwl_link_quality_cmd *lq;
469};
470
471struct iwl_station_priv_common {
472 struct iwl_rxon_context *ctx;
473 u8 sta_id;
474};
475
476/*
477 * iwl_station_priv: Driver's private station information
478 *
479 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
480 * in the structure for use by driver. This structure is places in that
481 * space.
482 *
483 * The common struct MUST be first because it is shared between
484 * 3945 and 4965!
485 */
486struct iwl_station_priv {
487 struct iwl_station_priv_common common;
488 struct iwl_lq_sta lq_sta;
489 atomic_t pending_frames;
490 bool client;
491 bool asleep;
492};
493
494/**
495 * struct iwl_vif_priv - driver's private per-interface information
496 *
497 * When mac80211 allocates a virtual interface, it can allocate
498 * space for us to put data into.
499 */
500struct iwl_vif_priv {
501 struct iwl_rxon_context *ctx;
502 u8 ibss_bssid_sta_id;
503};
504
505/* one for each uCode image (inst/data, boot/init/runtime) */
506struct fw_desc {
507 void *v_addr; /* access by driver */
508 dma_addr_t p_addr; /* access by card's busmaster DMA */
509 u32 len; /* bytes */
510};
511
512/* uCode file layout */
513struct iwl_ucode_header {
514 __le32 ver; /* major/minor/API/serial */
515 struct {
516 __le32 inst_size; /* bytes of runtime code */
517 __le32 data_size; /* bytes of runtime data */
518 __le32 init_size; /* bytes of init code */
519 __le32 init_data_size; /* bytes of init data */
520 __le32 boot_size; /* bytes of bootstrap code */
521 u8 data[0]; /* in same order as sizes */
522 } v1;
523};
524
525struct iwl4965_ibss_seq {
526 u8 mac[ETH_ALEN];
527 u16 seq_num;
528 u16 frag_num;
529 unsigned long packet_time;
530 struct list_head list;
531};
532
533struct iwl_sensitivity_ranges {
534 u16 min_nrg_cck;
535 u16 max_nrg_cck;
536
537 u16 nrg_th_cck;
538 u16 nrg_th_ofdm;
539
540 u16 auto_corr_min_ofdm;
541 u16 auto_corr_min_ofdm_mrc;
542 u16 auto_corr_min_ofdm_x1;
543 u16 auto_corr_min_ofdm_mrc_x1;
544
545 u16 auto_corr_max_ofdm;
546 u16 auto_corr_max_ofdm_mrc;
547 u16 auto_corr_max_ofdm_x1;
548 u16 auto_corr_max_ofdm_mrc_x1;
549
550 u16 auto_corr_max_cck;
551 u16 auto_corr_max_cck_mrc;
552 u16 auto_corr_min_cck;
553 u16 auto_corr_min_cck_mrc;
554
555 u16 barker_corr_th_min;
556 u16 barker_corr_th_min_mrc;
557 u16 nrg_th_cca;
558};
559
560
561#define KELVIN_TO_CELSIUS(x) ((x)-273)
562#define CELSIUS_TO_KELVIN(x) ((x)+273)
563
564
565/**
566 * struct iwl_hw_params
567 * @max_txq_num: Max # Tx queues supported
568 * @dma_chnl_num: Number of Tx DMA/FIFO channels
569 * @scd_bc_tbls_size: size of scheduler byte count tables
570 * @tfd_size: TFD size
571 * @tx/rx_chains_num: Number of TX/RX chains
572 * @valid_tx/rx_ant: usable antennas
573 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
574 * @max_rxq_log: Log-base-2 of max_rxq_size
575 * @rx_page_order: Rx buffer page order
576 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
577 * @max_stations:
578 * @ht40_channel: is 40MHz width possible in band 2.4
579 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
580 * @sw_crypto: 0 for hw, 1 for sw
581 * @max_xxx_size: for ucode uses
582 * @ct_kill_threshold: temperature threshold
583 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
584 * @struct iwl_sensitivity_ranges: range of sensitivity values
585 */
586struct iwl_hw_params {
587 u8 max_txq_num;
588 u8 dma_chnl_num;
589 u16 scd_bc_tbls_size;
590 u32 tfd_size;
591 u8 tx_chains_num;
592 u8 rx_chains_num;
593 u8 valid_tx_ant;
594 u8 valid_rx_ant;
595 u16 max_rxq_size;
596 u16 max_rxq_log;
597 u32 rx_page_order;
598 u32 rx_wrt_ptr_reg;
599 u8 max_stations;
600 u8 ht40_channel;
601 u8 max_beacon_itrvl; /* in 1024 ms */
602 u32 max_inst_size;
603 u32 max_data_size;
604 u32 max_bsm_size;
605 u32 ct_kill_threshold; /* value in hw-dependent units */
606 u16 beacon_time_tsf_bits;
607 const struct iwl_sensitivity_ranges *sens;
608};
609
610
611/******************************************************************************
612 *
613 * Functions implemented in core module which are forward declared here
614 * for use by iwl-[4-5].c
615 *
616 * NOTE: The implementation of these functions are not hardware specific
617 * which is why they are in the core module files.
618 *
619 * Naming convention --
620 * iwl_ <-- Is part of iwlwifi
621 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
622 * iwl4965_bg_ <-- Called from work queue context
623 * iwl4965_mac_ <-- mac80211 callback
624 *
625 ****************************************************************************/
626extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
627extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
628extern int iwl_legacy_queue_space(const struct iwl_queue *q);
629static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
630{
631 return q->write_ptr >= q->read_ptr ?
632 (i >= q->read_ptr && i < q->write_ptr) :
633 !(i < q->read_ptr && i >= q->write_ptr);
634}
635
636
637static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
638 int is_huge)
639{
640 /*
641 * This is for init calibration result and scan command which
642 * required buffer > TFD_MAX_PAYLOAD_SIZE,
643 * the big buffer at end of command array
644 */
645 if (is_huge)
646 return q->n_window; /* must be power of 2 */
647
648 /* Otherwise, use normal size buffers */
649 return index & (q->n_window - 1);
650}
651
652
653struct iwl_dma_ptr {
654 dma_addr_t dma;
655 void *addr;
656 size_t size;
657};
658
659#define IWL_OPERATION_MODE_AUTO 0
660#define IWL_OPERATION_MODE_HT_ONLY 1
661#define IWL_OPERATION_MODE_MIXED 2
662#define IWL_OPERATION_MODE_20MHZ 3
663
664#define IWL_TX_CRC_SIZE 4
665#define IWL_TX_DELIMITER_SIZE 4
666
667#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
668
669/* Sensitivity and chain noise calibration */
670#define INITIALIZATION_VALUE 0xFFFF
671#define IWL4965_CAL_NUM_BEACONS 20
672#define IWL_CAL_NUM_BEACONS 16
673#define MAXIMUM_ALLOWED_PATHLOSS 15
674
675#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
676
677#define MAX_FA_OFDM 50
678#define MIN_FA_OFDM 5
679#define MAX_FA_CCK 50
680#define MIN_FA_CCK 5
681
682#define AUTO_CORR_STEP_OFDM 1
683
684#define AUTO_CORR_STEP_CCK 3
685#define AUTO_CORR_MAX_TH_CCK 160
686
687#define NRG_DIFF 2
688#define NRG_STEP_CCK 2
689#define NRG_MARGIN 8
690#define MAX_NUMBER_CCK_NO_FA 100
691
692#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
693
694#define CHAIN_A 0
695#define CHAIN_B 1
696#define CHAIN_C 2
697#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
698#define ALL_BAND_FILTER 0xFF00
699#define IN_BAND_FILTER 0xFF
700#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
701
702#define NRG_NUM_PREV_STAT_L 20
703#define NUM_RX_CHAINS 3
704
705enum iwl4965_false_alarm_state {
706 IWL_FA_TOO_MANY = 0,
707 IWL_FA_TOO_FEW = 1,
708 IWL_FA_GOOD_RANGE = 2,
709};
710
711enum iwl4965_chain_noise_state {
712 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
713 IWL_CHAIN_NOISE_ACCUMULATE,
714 IWL_CHAIN_NOISE_CALIBRATED,
715 IWL_CHAIN_NOISE_DONE,
716};
717
718enum iwl4965_calib_enabled_state {
719 IWL_CALIB_DISABLED = 0, /* must be 0 */
720 IWL_CALIB_ENABLED = 1,
721};
722
723/*
724 * enum iwl_calib
725 * defines the order in which results of initial calibrations
726 * should be sent to the runtime uCode
727 */
728enum iwl_calib {
729 IWL_CALIB_MAX,
730};
731
732/* Opaque calibration results */
733struct iwl_calib_result {
734 void *buf;
735 size_t buf_len;
736};
737
738enum ucode_type {
739 UCODE_NONE = 0,
740 UCODE_INIT,
741 UCODE_RT
742};
743
744/* Sensitivity calib data */
745struct iwl_sensitivity_data {
746 u32 auto_corr_ofdm;
747 u32 auto_corr_ofdm_mrc;
748 u32 auto_corr_ofdm_x1;
749 u32 auto_corr_ofdm_mrc_x1;
750 u32 auto_corr_cck;
751 u32 auto_corr_cck_mrc;
752
753 u32 last_bad_plcp_cnt_ofdm;
754 u32 last_fa_cnt_ofdm;
755 u32 last_bad_plcp_cnt_cck;
756 u32 last_fa_cnt_cck;
757
758 u32 nrg_curr_state;
759 u32 nrg_prev_state;
760 u32 nrg_value[10];
761 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
762 u32 nrg_silence_ref;
763 u32 nrg_energy_idx;
764 u32 nrg_silence_idx;
765 u32 nrg_th_cck;
766 s32 nrg_auto_corr_silence_diff;
767 u32 num_in_cck_no_fa;
768 u32 nrg_th_ofdm;
769
770 u16 barker_corr_th_min;
771 u16 barker_corr_th_min_mrc;
772 u16 nrg_th_cca;
773};
774
775/* Chain noise (differential Rx gain) calib data */
776struct iwl_chain_noise_data {
777 u32 active_chains;
778 u32 chain_noise_a;
779 u32 chain_noise_b;
780 u32 chain_noise_c;
781 u32 chain_signal_a;
782 u32 chain_signal_b;
783 u32 chain_signal_c;
784 u16 beacon_count;
785 u8 disconn_array[NUM_RX_CHAINS];
786 u8 delta_gain_code[NUM_RX_CHAINS];
787 u8 radio_write;
788 u8 state;
789};
790
791#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
792#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
793
794#define IWL_TRAFFIC_ENTRIES (256)
795#define IWL_TRAFFIC_ENTRY_SIZE (64)
796
797enum {
798 MEASUREMENT_READY = (1 << 0),
799 MEASUREMENT_ACTIVE = (1 << 1),
800};
801
802/* interrupt statistics */
803struct isr_statistics {
804 u32 hw;
805 u32 sw;
806 u32 err_code;
807 u32 sch;
808 u32 alive;
809 u32 rfkill;
810 u32 ctkill;
811 u32 wakeup;
812 u32 rx;
813 u32 rx_handlers[REPLY_MAX];
814 u32 tx;
815 u32 unhandled;
816};
817
818/* management statistics */
819enum iwl_mgmt_stats {
820 MANAGEMENT_ASSOC_REQ = 0,
821 MANAGEMENT_ASSOC_RESP,
822 MANAGEMENT_REASSOC_REQ,
823 MANAGEMENT_REASSOC_RESP,
824 MANAGEMENT_PROBE_REQ,
825 MANAGEMENT_PROBE_RESP,
826 MANAGEMENT_BEACON,
827 MANAGEMENT_ATIM,
828 MANAGEMENT_DISASSOC,
829 MANAGEMENT_AUTH,
830 MANAGEMENT_DEAUTH,
831 MANAGEMENT_ACTION,
832 MANAGEMENT_MAX,
833};
834/* control statistics */
835enum iwl_ctrl_stats {
836 CONTROL_BACK_REQ = 0,
837 CONTROL_BACK,
838 CONTROL_PSPOLL,
839 CONTROL_RTS,
840 CONTROL_CTS,
841 CONTROL_ACK,
842 CONTROL_CFEND,
843 CONTROL_CFENDACK,
844 CONTROL_MAX,
845};
846
847struct traffic_stats {
848#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
849 u32 mgmt[MANAGEMENT_MAX];
850 u32 ctrl[CONTROL_MAX];
851 u32 data_cnt;
852 u64 data_bytes;
853#endif
854};
855
856/*
857 * iwl_switch_rxon: "channel switch" structure
858 *
859 * @ switch_in_progress: channel switch in progress
860 * @ channel: new channel
861 */
862struct iwl_switch_rxon {
863 bool switch_in_progress;
864 __le16 channel;
865};
866
867/*
868 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
869 * to perform continuous uCode event logging operation if enabled
870 */
871#define UCODE_TRACE_PERIOD (100)
872
873/*
874 * iwl_event_log: current uCode event log position
875 *
876 * @ucode_trace: enable/disable ucode continuous trace timer
877 * @num_wraps: how many times the event buffer wraps
878 * @next_entry: the entry just before the next one that uCode would fill
879 * @non_wraps_count: counter for no wrap detected when dump ucode events
880 * @wraps_once_count: counter for wrap once detected when dump ucode events
881 * @wraps_more_count: counter for wrap more than once detected
882 * when dump ucode events
883 */
884struct iwl_event_log {
885 bool ucode_trace;
886 u32 num_wraps;
887 u32 next_entry;
888 int non_wraps_count;
889 int wraps_once_count;
890 int wraps_more_count;
891};
892
893/*
894 * host interrupt timeout value
895 * used with setting interrupt coalescing timer
896 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
897 *
898 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
899 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
900 */
901#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
902#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
903#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
904#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
905#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
906#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
907
908/*
909 * This is the threshold value of plcp error rate per 100mSecs. It is
910 * used to set and check for the validity of plcp_delta.
911 */
912#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
913#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
914#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
915#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
916#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
917#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
918
919#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
920#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
921
922/* TX queue watchdog timeouts in mSecs */
923#define IWL_DEF_WD_TIMEOUT (2000)
924#define IWL_LONG_WD_TIMEOUT (10000)
925#define IWL_MAX_WD_TIMEOUT (120000)
926
927enum iwl_reset {
928 IWL_RF_RESET = 0,
929 IWL_FW_RESET,
930 IWL_MAX_FORCE_RESET,
931};
932
933struct iwl_force_reset {
934 int reset_request_count;
935 int reset_success_count;
936 int reset_reject_count;
937 unsigned long reset_duration;
938 unsigned long last_force_reset_jiffies;
939};
940
941/* extend beacon time format bit shifting */
942/*
943 * for _3945 devices
944 * bits 31:24 - extended
945 * bits 23:0 - interval
946 */
947#define IWL3945_EXT_BEACON_TIME_POS 24
948/*
949 * for _4965 devices
950 * bits 31:22 - extended
951 * bits 21:0 - interval
952 */
953#define IWL4965_EXT_BEACON_TIME_POS 22
954
955enum iwl_rxon_context_id {
956 IWL_RXON_CTX_BSS,
957
958 NUM_IWL_RXON_CTX
959};
960
961struct iwl_rxon_context {
962 struct ieee80211_vif *vif;
963
964 const u8 *ac_to_fifo;
965 const u8 *ac_to_queue;
966 u8 mcast_queue;
967
968 /*
969 * We could use the vif to indicate active, but we
970 * also need it to be active during disabling when
971 * we already removed the vif for type setting.
972 */
973 bool always_active, is_active;
974
975 bool ht_need_multiple_chains;
976
977 enum iwl_rxon_context_id ctxid;
978
979 u32 interface_modes, exclusive_interface_modes;
980 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
981
982 /*
983 * We declare this const so it can only be
984 * changed via explicit cast within the
985 * routines that actually update the physical
986 * hardware.
987 */
988 const struct iwl_legacy_rxon_cmd active;
989 struct iwl_legacy_rxon_cmd staging;
990
991 struct iwl_rxon_time_cmd timing;
992
993 struct iwl_qos_info qos_data;
994
995 u8 bcast_sta_id, ap_sta_id;
996
997 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
998 u8 qos_cmd;
999 u8 wep_key_cmd;
1000
1001 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1002 u8 key_mapping_keys;
1003
1004 __le32 station_flags;
1005
1006 struct {
1007 bool non_gf_sta_present;
1008 u8 protection;
1009 bool enabled, is_40mhz;
1010 u8 extension_chan_offset;
1011 } ht;
1012};
1013
1014struct iwl_priv {
1015
1016 /* ieee device used by generic ieee processing code */
1017 struct ieee80211_hw *hw;
1018 struct ieee80211_channel *ieee_channels;
1019 struct ieee80211_rate *ieee_rates;
1020 struct iwl_cfg *cfg;
1021
1022 /* temporary frame storage list */
1023 struct list_head free_frames;
1024 int frames_count;
1025
1026 enum ieee80211_band band;
1027 int alloc_rxb_page;
1028
1029 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1030 struct iwl_rx_mem_buffer *rxb);
1031
1032 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1033
1034 /* spectrum measurement report caching */
1035 struct iwl_spectrum_notification measure_report;
1036 u8 measurement_status;
1037
1038 /* ucode beacon time */
1039 u32 ucode_beacon_time;
1040 int missed_beacon_threshold;
1041
1042 /* track IBSS manager (last beacon) status */
1043 u32 ibss_manager;
1044
1045 /* storing the jiffies when the plcp error rate is received */
1046 unsigned long plcp_jiffies;
1047
1048 /* force reset */
1049 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1050
1051 /* we allocate array of iwl_channel_info for NIC's valid channels.
1052 * Access via channel # using indirect index array */
1053 struct iwl_channel_info *channel_info; /* channel info array */
1054 u8 channel_count; /* # of channels */
1055
1056 /* thermal calibration */
1057 s32 temperature; /* degrees Kelvin */
1058 s32 last_temperature;
1059
1060 /* init calibration results */
1061 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1062
1063 /* Scan related variables */
1064 unsigned long scan_start;
1065 unsigned long scan_start_tsf;
1066 void *scan_cmd;
1067 enum ieee80211_band scan_band;
1068 struct cfg80211_scan_request *scan_request;
1069 struct ieee80211_vif *scan_vif;
1070 bool is_internal_short_scan;
1071 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1072 u8 mgmt_tx_ant;
1073
1074 /* spinlock */
1075 spinlock_t lock; /* protect general shared data */
1076 spinlock_t hcmd_lock; /* protect hcmd */
1077 spinlock_t reg_lock; /* protect hw register access */
1078 struct mutex mutex;
1079 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1080
1081 /* basic pci-network driver stuff */
1082 struct pci_dev *pci_dev;
1083
1084 /* pci hardware address support */
1085 void __iomem *hw_base;
1086 u32 hw_rev;
1087 u32 hw_wa_rev;
1088 u8 rev_id;
1089
1090 /* microcode/device supports multiple contexts */
1091 u8 valid_contexts;
1092
1093 /* command queue number */
1094 u8 cmd_queue;
1095
1096 /* max number of station keys */
1097 u8 sta_key_max_num;
1098
1099 /* EEPROM MAC addresses */
1100 struct mac_address addresses[1];
1101
1102 /* uCode images, save to reload in case of failure */
1103 int fw_index; /* firmware we're trying to load */
1104 u32 ucode_ver; /* version of ucode, copy of
1105 iwl_ucode.ver */
1106 struct fw_desc ucode_code; /* runtime inst */
1107 struct fw_desc ucode_data; /* runtime data original */
1108 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1109 struct fw_desc ucode_init; /* initialization inst */
1110 struct fw_desc ucode_init_data; /* initialization data */
1111 struct fw_desc ucode_boot; /* bootstrap inst */
1112 enum ucode_type ucode_type;
1113 u8 ucode_write_complete; /* the image write is complete */
1114 char firmware_name[25];
1115
1116 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1117
1118 struct iwl_switch_rxon switch_rxon;
1119
1120 /* 1st responses from initialize and runtime uCode images.
1121 * _4965's initialize alive response contains some calibration data. */
1122 struct iwl_init_alive_resp card_alive_init;
1123 struct iwl_alive_resp card_alive;
1124
1125 u16 active_rate;
1126
1127 u8 start_calib;
1128 struct iwl_sensitivity_data sensitivity_data;
1129 struct iwl_chain_noise_data chain_noise_data;
1130 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1131
1132 struct iwl_ht_config current_ht_config;
1133
1134 /* Rate scaling data */
1135 u8 retry_rate;
1136
1137 wait_queue_head_t wait_command_queue;
1138
1139 int activity_timer_active;
1140
1141 /* Rx and Tx DMA processing queues */
1142 struct iwl_rx_queue rxq;
1143 struct iwl_tx_queue *txq;
1144 unsigned long txq_ctx_active_msk;
1145 struct iwl_dma_ptr kw; /* keep warm address */
1146 struct iwl_dma_ptr scd_bc_tbls;
1147
1148 u32 scd_base_addr; /* scheduler sram base address */
1149
1150 unsigned long status;
1151
1152 /* counts mgmt, ctl, and data packets */
1153 struct traffic_stats tx_stats;
1154 struct traffic_stats rx_stats;
1155
1156 /* counts interrupts */
1157 struct isr_statistics isr_stats;
1158
1159 struct iwl_power_mgr power_data;
1160
1161 /* context information */
1162 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1163
1164 /* station table variables */
1165
1166 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1167 spinlock_t sta_lock;
1168 int num_stations;
1169 struct iwl_station_entry stations[IWL_STATION_COUNT];
1170 unsigned long ucode_key_table;
1171
1172 /* queue refcounts */
1173#define IWL_MAX_HW_QUEUES 32
1174 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1175 /* for each AC */
1176 atomic_t queue_stop_count[4];
1177
1178 /* Indication if ieee80211_ops->open has been called */
1179 u8 is_open;
1180
1181 u8 mac80211_registered;
1182
1183 /* eeprom -- this is in the card's little endian byte order */
1184 u8 *eeprom;
1185 struct iwl_eeprom_calib_info *calib_info;
1186
1187 enum nl80211_iftype iw_mode;
1188
1189 /* Last Rx'd beacon timestamp */
1190 u64 timestamp;
1191
1192 union {
1193#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1194 struct {
1195 void *shared_virt;
1196 dma_addr_t shared_phys;
1197
1198 struct delayed_work thermal_periodic;
1199 struct delayed_work rfkill_poll;
1200
1201 struct iwl3945_notif_statistics statistics;
1202#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1203 struct iwl3945_notif_statistics accum_statistics;
1204 struct iwl3945_notif_statistics delta_statistics;
1205 struct iwl3945_notif_statistics max_delta;
1206#endif
1207
1208 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */
1210
1211 /* Rx'd packet timing information */
1212 u32 last_beacon_time;
1213 u64 last_tsf;
1214
1215 /*
1216 * each calibration channel group in the
1217 * EEPROM has a derived clip setting for
1218 * each rate.
1219 */
1220 const struct iwl3945_clip_group clip_groups[5];
1221
1222 } _3945;
1223#endif
1224#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1225 struct {
1226 /*
1227 * reporting the number of tids has AGG on. 0 means
1228 * no AGGREGATION
1229 */
1230 u8 agg_tids_count;
1231
1232 struct iwl_rx_phy_res last_phy_res;
1233 bool last_phy_res_valid;
1234
1235 struct completion firmware_loading_complete;
1236
1237 /*
1238 * chain noise reset and gain commands are the
1239 * two extra calibration commands follows the standard
1240 * phy calibration commands
1241 */
1242 u8 phy_calib_chain_noise_reset_cmd;
1243 u8 phy_calib_chain_noise_gain_cmd;
1244
1245 struct iwl_notif_statistics statistics;
1246#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1247 struct iwl_notif_statistics accum_statistics;
1248 struct iwl_notif_statistics delta_statistics;
1249 struct iwl_notif_statistics max_delta;
1250#endif
1251
1252 } _4965;
1253#endif
1254 };
1255
1256 struct iwl_hw_params hw_params;
1257
1258 u32 inta_mask;
1259
1260 struct workqueue_struct *workqueue;
1261
1262 struct work_struct restart;
1263 struct work_struct scan_completed;
1264 struct work_struct rx_replenish;
1265 struct work_struct abort_scan;
1266
1267 struct iwl_rxon_context *beacon_ctx;
1268 struct sk_buff *beacon_skb;
1269
1270 struct work_struct start_internal_scan;
1271 struct work_struct tx_flush;
1272
1273 struct tasklet_struct irq_tasklet;
1274
1275 struct delayed_work init_alive_start;
1276 struct delayed_work alive_start;
1277 struct delayed_work scan_check;
1278
1279 /* TX Power */
1280 s8 tx_power_user_lmt;
1281 s8 tx_power_device_lmt;
1282 s8 tx_power_next;
1283
1284
1285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1286 /* debugging info */
1287 u32 debug_level; /* per device debugging will override global
1288 iwlegacy_debug_level if set */
1289#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1290#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1291 /* debugfs */
1292 u16 tx_traffic_idx;
1293 u16 rx_traffic_idx;
1294 u8 *tx_traffic;
1295 u8 *rx_traffic;
1296 struct dentry *debugfs_dir;
1297 u32 dbgfs_sram_offset, dbgfs_sram_len;
1298 bool disable_ht40;
1299#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1300
1301 struct work_struct txpower_work;
1302 u32 disable_sens_cal;
1303 u32 disable_chain_noise_cal;
1304 u32 disable_tx_power_cal;
1305 struct work_struct run_time_calib_work;
1306 struct timer_list statistics_periodic;
1307 struct timer_list ucode_trace;
1308 struct timer_list watchdog;
1309 bool hw_ready;
1310
1311 struct iwl_event_log event_log;
1312
1313 struct led_classdev led;
1314 unsigned long blink_on, blink_off;
1315 bool led_registered;
1316}; /*iwl_priv */
1317
1318static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1319{
1320 set_bit(txq_id, &priv->txq_ctx_active_msk);
1321}
1322
1323static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1324{
1325 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1326}
1327
1328#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1329/*
1330 * iwl_legacy_get_debug_level: Return active debug level for device
1331 *
1332 * Using sysfs it is possible to set per device debug level. This debug
1333 * level will be used if set, otherwise the global debug level which can be
1334 * set via module parameter is used.
1335 */
1336static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1337{
1338 if (priv->debug_level)
1339 return priv->debug_level;
1340 else
1341 return iwlegacy_debug_level;
1342}
1343#else
1344static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1345{
1346 return iwlegacy_debug_level;
1347}
1348#endif
1349
1350
1351static inline struct ieee80211_hdr *
1352iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1353 int txq_id, int idx)
1354{
1355 if (priv->txq[txq_id].txb[idx].skb)
1356 return (struct ieee80211_hdr *)priv->txq[txq_id].
1357 txb[idx].skb->data;
1358 return NULL;
1359}
1360
1361static inline struct iwl_rxon_context *
1362iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1363{
1364 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1365
1366 return vif_priv->ctx;
1367}
1368
1369#define for_each_context(priv, ctx) \
1370 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1371 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1372 if (priv->valid_contexts & BIT(ctx->ctxid))
1373
1374static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1375 enum iwl_rxon_context_id ctxid)
1376{
1377 return (priv->contexts[ctxid].active.filter_flags &
1378 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1379}
1380
1381static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1382{
1383 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1384}
1385
1386static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1387{
1388 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1389}
1390
1391static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1392{
1393 if (ch_info == NULL)
1394 return 0;
1395 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1396}
1397
1398static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1399{
1400 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1401}
1402
1403static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1404{
1405 return ch_info->band == IEEE80211_BAND_5GHZ;
1406}
1407
1408static inline int
1409iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1410{
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412}
1413
1414static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{
1417 __free_pages(page, priv->hw_params.rx_page_order);
1418 priv->alloc_rxb_page--;
1419}
1420
1421static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1422{
1423 free_pages(page, priv->hw_params.rx_page_order);
1424 priv->alloc_rxb_page--;
1425}
1426#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
45#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
100 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
101 TP_ARGS(priv, time, data, ev),
102 TP_STRUCT__entry(
103 PRIV_ENTRY
104
105 __field(u32, time)
106 __field(u32, data)
107 __field(u32, ev)
108 ),
109 TP_fast_assign(
110 PRIV_ASSIGN;
111 __entry->time = time;
112 __entry->data = data;
113 __entry->ev = ev;
114 ),
115 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
116 __entry->priv, __entry->time, __entry->data, __entry->ev)
117);
118
119TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
120 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
121 TP_ARGS(priv, wraps, n_entry, p_entry),
122 TP_STRUCT__entry(
123 PRIV_ENTRY
124
125 __field(u32, wraps)
126 __field(u32, n_entry)
127 __field(u32, p_entry)
128 ),
129 TP_fast_assign(
130 PRIV_ASSIGN;
131 __entry->wraps = wraps;
132 __entry->n_entry = n_entry;
133 __entry->p_entry = p_entry;
134 ),
135 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
136 __entry->priv, __entry->wraps, __entry->n_entry,
137 __entry->p_entry)
138);
139
140#undef TRACE_SYSTEM
141#define TRACE_SYSTEM iwlwifi
142
143TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
144 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
145 TP_ARGS(priv, hcmd, len, flags),
146 TP_STRUCT__entry(
147 PRIV_ENTRY
148 __dynamic_array(u8, hcmd, len)
149 __field(u32, flags)
150 ),
151 TP_fast_assign(
152 PRIV_ASSIGN;
153 memcpy(__get_dynamic_array(hcmd), hcmd, len);
154 __entry->flags = flags;
155 ),
156 TP_printk("[%p] hcmd %#.2x (%ssync)",
157 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
158 __entry->flags & CMD_ASYNC ? "a" : "")
159);
160
161TRACE_EVENT(iwlwifi_legacy_dev_rx,
162 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
163 TP_ARGS(priv, rxbuf, len),
164 TP_STRUCT__entry(
165 PRIV_ENTRY
166 __dynamic_array(u8, rxbuf, len)
167 ),
168 TP_fast_assign(
169 PRIV_ASSIGN;
170 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
171 ),
172 TP_printk("[%p] RX cmd %#.2x",
173 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
174);
175
176TRACE_EVENT(iwlwifi_legacy_dev_tx,
177 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
178 void *buf0, size_t buf0_len,
179 void *buf1, size_t buf1_len),
180 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
181 TP_STRUCT__entry(
182 PRIV_ENTRY
183
184 __field(size_t, framelen)
185 __dynamic_array(u8, tfd, tfdlen)
186
187 /*
188 * Do not insert between or below these items,
189 * we want to keep the frame together (except
190 * for the possible padding).
191 */
192 __dynamic_array(u8, buf0, buf0_len)
193 __dynamic_array(u8, buf1, buf1_len)
194 ),
195 TP_fast_assign(
196 PRIV_ASSIGN;
197 __entry->framelen = buf0_len + buf1_len;
198 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
199 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
200 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
201 ),
202 TP_printk("[%p] TX %.2x (%zu bytes)",
203 __entry->priv,
204 ((u8 *)__get_dynamic_array(buf0))[0],
205 __entry->framelen)
206);
207
208TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
209 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
210 u32 data1, u32 data2, u32 line, u32 blink1,
211 u32 blink2, u32 ilink1, u32 ilink2),
212 TP_ARGS(priv, desc, time, data1, data2, line,
213 blink1, blink2, ilink1, ilink2),
214 TP_STRUCT__entry(
215 PRIV_ENTRY
216 __field(u32, desc)
217 __field(u32, time)
218 __field(u32, data1)
219 __field(u32, data2)
220 __field(u32, line)
221 __field(u32, blink1)
222 __field(u32, blink2)
223 __field(u32, ilink1)
224 __field(u32, ilink2)
225 ),
226 TP_fast_assign(
227 PRIV_ASSIGN;
228 __entry->desc = desc;
229 __entry->time = time;
230 __entry->data1 = data1;
231 __entry->data2 = data2;
232 __entry->line = line;
233 __entry->blink1 = blink1;
234 __entry->blink2 = blink2;
235 __entry->ilink1 = ilink1;
236 __entry->ilink2 = ilink2;
237 ),
238 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
239 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
240 __entry->priv, __entry->desc, __entry->time, __entry->data1,
241 __entry->data2, __entry->line, __entry->blink1,
242 __entry->blink2, __entry->ilink1, __entry->ilink2)
243);
244
245TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
246 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
247 TP_ARGS(priv, time, data, ev),
248 TP_STRUCT__entry(
249 PRIV_ENTRY
250
251 __field(u32, time)
252 __field(u32, data)
253 __field(u32, ev)
254 ),
255 TP_fast_assign(
256 PRIV_ASSIGN;
257 __entry->time = time;
258 __entry->data = data;
259 __entry->ev = ev;
260 ),
261 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
262 __entry->priv, __entry->time, __entry->data, __entry->ev)
263);
264#endif /* __IWLWIFI_DEVICE_TRACE */
265
266#undef TRACE_INCLUDE_PATH
267#define TRACE_INCLUDE_PATH .
268#undef TRACE_INCLUDE_FILE
269#define TRACE_INCLUDE_FILE iwl-devtrace
270#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..04c5648027df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 return;
320 }
321}
322
323#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
324 ? # x " " : "")
325/**
326 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
327 *
328 * Does not set up a command, or touch hardware.
329 */
330static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
331 enum ieee80211_band band, u16 channel,
332 const struct iwl_eeprom_channel *eeprom_ch,
333 u8 clear_ht40_extension_channel)
334{
335 struct iwl_channel_info *ch_info;
336
337 ch_info = (struct iwl_channel_info *)
338 iwl_legacy_get_channel_info(priv, band, channel);
339
340 if (!iwl_legacy_is_channel_valid(ch_info))
341 return -1;
342
343 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
344 " Ad-Hoc %ssupported\n",
345 ch_info->channel,
346 iwl_legacy_is_channel_a_band(ch_info) ?
347 "5.2" : "2.4",
348 CHECK_AND_PRINT(IBSS),
349 CHECK_AND_PRINT(ACTIVE),
350 CHECK_AND_PRINT(RADAR),
351 CHECK_AND_PRINT(WIDE),
352 CHECK_AND_PRINT(DFS),
353 eeprom_ch->flags,
354 eeprom_ch->max_power_avg,
355 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
356 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
357 "" : "not ");
358
359 ch_info->ht40_eeprom = *eeprom_ch;
360 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
361 ch_info->ht40_flags = eeprom_ch->flags;
362 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
363 ch_info->ht40_extension_channel &=
364 ~clear_ht40_extension_channel;
365
366 return 0;
367}
368
369#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
370 ? # x " " : "")
371
372/**
373 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
374 */
375int iwl_legacy_init_channel_map(struct iwl_priv *priv)
376{
377 int eeprom_ch_count = 0;
378 const u8 *eeprom_ch_index = NULL;
379 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
380 int band, ch;
381 struct iwl_channel_info *ch_info;
382
383 if (priv->channel_count) {
384 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
385 return 0;
386 }
387
388 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
389
390 priv->channel_count =
391 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
395 ARRAY_SIZE(iwlegacy_eeprom_band_5);
396
397 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
398 priv->channel_count);
399
400 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
401 priv->channel_count, GFP_KERNEL);
402 if (!priv->channel_info) {
403 IWL_ERR(priv, "Could not allocate channel_info\n");
404 priv->channel_count = 0;
405 return -ENOMEM;
406 }
407
408 ch_info = priv->channel_info;
409
410 /* Loop through the 5 EEPROM bands adding them in order to the
411 * channel map we maintain (that contains additional information than
412 * what just in the EEPROM) */
413 for (band = 1; band <= 5; band++) {
414
415 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
416 &eeprom_ch_info, &eeprom_ch_index);
417
418 /* Loop through each band adding each of the channels */
419 for (ch = 0; ch < eeprom_ch_count; ch++) {
420 ch_info->channel = eeprom_ch_index[ch];
421 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
422 IEEE80211_BAND_5GHZ;
423
424 /* permanently store EEPROM's channel regulatory flags
425 * and max power in channel info database. */
426 ch_info->eeprom = eeprom_ch_info[ch];
427
428 /* Copy the run-time flags so they are there even on
429 * invalid channels */
430 ch_info->flags = eeprom_ch_info[ch].flags;
431 /* First write that ht40 is not enabled, and then enable
432 * one by one */
433 ch_info->ht40_extension_channel =
434 IEEE80211_CHAN_NO_HT40;
435
436 if (!(iwl_legacy_is_channel_valid(ch_info))) {
437 IWL_DEBUG_EEPROM(priv,
438 "Ch. %d Flags %x [%sGHz] - "
439 "No traffic\n",
440 ch_info->channel,
441 ch_info->flags,
442 iwl_legacy_is_channel_a_band(ch_info) ?
443 "5.2" : "2.4");
444 ch_info++;
445 continue;
446 }
447
448 /* Initialize regulatory-based run-time data */
449 ch_info->max_power_avg = ch_info->curr_txpow =
450 eeprom_ch_info[ch].max_power_avg;
451 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
452 ch_info->min_power = 0;
453
454 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
455 "%s%s%s%s%s%s(0x%02x %ddBm):"
456 " Ad-Hoc %ssupported\n",
457 ch_info->channel,
458 iwl_legacy_is_channel_a_band(ch_info) ?
459 "5.2" : "2.4",
460 CHECK_AND_PRINT_I(VALID),
461 CHECK_AND_PRINT_I(IBSS),
462 CHECK_AND_PRINT_I(ACTIVE),
463 CHECK_AND_PRINT_I(RADAR),
464 CHECK_AND_PRINT_I(WIDE),
465 CHECK_AND_PRINT_I(DFS),
466 eeprom_ch_info[ch].flags,
467 eeprom_ch_info[ch].max_power_avg,
468 ((eeprom_ch_info[ch].
469 flags & EEPROM_CHANNEL_IBSS)
470 && !(eeprom_ch_info[ch].
471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not ");
473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++;
482 }
483 }
484
485 /* Check if we do have HT40 channels */
486 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
487 EEPROM_REGULATORY_BAND_NO_HT40 &&
488 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
489 EEPROM_REGULATORY_BAND_NO_HT40)
490 return 0;
491
492 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
493 for (band = 6; band <= 7; band++) {
494 enum ieee80211_band ieeeband;
495
496 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
497 &eeprom_ch_info, &eeprom_ch_index);
498
499 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
500 ieeeband =
501 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
502
503 /* Loop through each band adding each of the channels */
504 for (ch = 0; ch < eeprom_ch_count; ch++) {
505 /* Set up driver's info for lower half */
506 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
507 eeprom_ch_index[ch],
508 &eeprom_ch_info[ch],
509 IEEE80211_CHAN_NO_HT40PLUS);
510
511 /* Set up driver's info for upper half */
512 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
513 eeprom_ch_index[ch] + 4,
514 &eeprom_ch_info[ch],
515 IEEE80211_CHAN_NO_HT40MINUS);
516 }
517 }
518
519 return 0;
520}
521EXPORT_SYMBOL(iwl_legacy_init_channel_map);
522
523/*
524 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
525 */
526void iwl_legacy_free_channel_map(struct iwl_priv *priv)
527{
528 kfree(priv->channel_info);
529 priv->channel_count = 0;
530}
531EXPORT_SYMBOL(iwl_legacy_free_channel_map);
532
533/**
534 * iwl_legacy_get_channel_info - Find driver's private channel info
535 *
536 * Based on band and channel number.
537 */
538const struct
539iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
540 enum ieee80211_band band, u16 channel)
541{
542 int i;
543
544 switch (band) {
545 case IEEE80211_BAND_5GHZ:
546 for (i = 14; i < priv->channel_count; i++) {
547 if (priv->channel_info[i].channel == channel)
548 return &priv->channel_info[i];
549 }
550 break;
551 case IEEE80211_BAND_2GHZ:
552 if (channel >= 1 && channel <= 14)
553 return &priv->channel_info[channel - 1];
554 break;
555 default:
556 BUG();
557 }
558
559 return NULL;
560}
561EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..c59c81002022
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transfered
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 BUG_ON(cmd->flags & CMD_ASYNC);
149
150 /* A synchronous command can not have a callback set. */
151 BUG_ON(cmd->callback);
152
153 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
154 iwl_legacy_get_cmd_string(cmd->id));
155 mutex_lock(&priv->sync_cmd_mutex);
156
157 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
158 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
159 iwl_legacy_get_cmd_string(cmd->id));
160
161 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
162 if (cmd_idx < 0) {
163 ret = cmd_idx;
164 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
165 iwl_legacy_get_cmd_string(cmd->id), ret);
166 goto out;
167 }
168
169 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
170 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
171 HOST_COMPLETE_TIMEOUT);
172 if (!ret) {
173 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
174 IWL_ERR(priv,
175 "Error sending %s: time out after %dms.\n",
176 iwl_legacy_get_cmd_string(cmd->id),
177 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
178
179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
180 IWL_DEBUG_INFO(priv,
181 "Clearing HCMD_ACTIVE for command %s\n",
182 iwl_legacy_get_cmd_string(cmd->id));
183 ret = -ETIMEDOUT;
184 goto cancel;
185 }
186 }
187
188 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
189 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
190 iwl_legacy_get_cmd_string(cmd->id));
191 ret = -ECANCELED;
192 goto fail;
193 }
194 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
195 IWL_ERR(priv, "Command %s failed: FW Error\n",
196 iwl_legacy_get_cmd_string(cmd->id));
197 ret = -EIO;
198 goto fail;
199 }
200 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
201 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
202 iwl_legacy_get_cmd_string(cmd->id));
203 ret = -EIO;
204 goto cancel;
205 }
206
207 ret = 0;
208 goto out;
209
210cancel:
211 if (cmd->flags & CMD_WANT_SKB) {
212 /*
213 * Cancel the CMD_WANT_SKB flag for the cmd in the
214 * TX cmd queue. Otherwise in case the cmd comes
215 * in later, it will possibly set an invalid
216 * address (cmd->meta.source).
217 */
218 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
219 ~CMD_WANT_SKB;
220 }
221fail:
222 if (cmd->reply_page) {
223 iwl_legacy_free_pages(priv, cmd->reply_page);
224 cmd->reply_page = 0;
225 }
226out:
227 mutex_unlock(&priv->sync_cmd_mutex);
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
136#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
137
138static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
139{
140 clear_bit(STATUS_INT_ENABLED, &priv->status);
141
142 /* disable interrupts from uCode/NIC to host */
143 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
144
145 /* acknowledge/clear/reset any interrupts still pending
146 * from uCode or flow handler (Rx/Tx DMA) */
147 iwl_write32(priv, CSR_INT, 0xffffffff);
148 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
150}
151
152static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
153{
154 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
155 set_bit(STATUS_INT_ENABLED, &priv->status);
156 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
157}
158
159/**
160 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
161 * @priv -- pointer to iwl_priv data structure
162 * @tsf_bits -- number of bits need to shift for masking)
163 */
164static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
165 u16 tsf_bits)
166{
167 return (1 << tsf_bits) - 1;
168}
169
170/**
171 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
172 * @priv -- pointer to iwl_priv data structure
173 * @tsf_bits -- number of bits need to shift for masking)
174 */
175static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
176 u16 tsf_bits)
177{
178 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
179}
180
181#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44
45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode;
47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62};
63
64/*
65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
66 * Led blink rate analysis showed an average deviation of 0% on 3945,
67 * 5% on 4965 HW.
68 * Need to compensate on the led on/off time per HW according to the deviation
69 * to achieve the desired led frequency
70 * The calculation is: (100-averageDeviation)/100 * blinkTime
71 * For code efficiency the calculation will be:
72 * compensation = (100 - averageDeviation) * 64 / 100
73 * NewBlinkTime = (compensation * BlinkTime) / 64
74 */
75static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
76 u8 time, u16 compensation)
77{
78 if (!compensation) {
79 IWL_ERR(priv, "undefined blink compensation: "
80 "use pre-defined blinking time\n");
81 return time;
82 }
83
84 return (u8)((time * compensation) >> 6);
85}
86
87/* Set led pattern command */
88static int iwl_legacy_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
91{
92 struct iwl_led_cmd led_cmd = {
93 .id = IWL_LED_LINK,
94 .interval = IWL_DEF_LED_INTRVL
95 };
96 int ret;
97
98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
100
101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
103
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
105 priv->cfg->base_params->led_compensation);
106 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
107 priv->cfg->base_params->led_compensation);
108 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
109 priv->cfg->base_params->led_compensation);
110
111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
117}
118
119static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
121{
122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
123 unsigned long on = 0;
124
125 if (brightness > 0)
126 on = IWL_LED_SOLID;
127
128 iwl_legacy_led_cmd(priv, on, 0);
129}
130
131static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
134{
135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
136
137 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
138}
139
140void iwl_legacy_leds_init(struct iwl_priv *priv)
141{
142 int mode = led_mode;
143 int ret;
144
145 if (mode == IWL_LED_DEFAULT)
146 mode = priv->cfg->led_mode;
147
148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
149 wiphy_name(priv->hw->wiphy));
150 priv->led.brightness_set = iwl_legacy_led_brightness_set;
151 priv->led.blink_set = iwl_legacy_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
168 }
169
170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
171 if (ret) {
172 kfree(priv->led.name);
173 return;
174 }
175
176 priv->led_registered = true;
177}
178EXPORT_SYMBOL(iwl_legacy_leds_init);
179
180void iwl_legacy_leds_exit(struct iwl_priv *priv)
181{
182 if (!priv->led_registered)
183 return;
184
185 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name);
187}
188EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..38647e481eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_prph_h__
64#define __iwl_legacy_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler
249 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
251 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
252 * host DRAM. It steers each frame's Tx command (which contains the frame
253 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues.
256 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 *
269 *
270 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
271 * In addition, driver can map the remaining queues to Tx DMA/FIFO
272 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
273 *
274 * The driver sets up each queue to work in one of two modes:
275 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station.
281 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order).
288 *
289 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation.
292 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
294 * The device may automatically retry Tx, but will retry only one frame
295 * at a time, until receiving ACK from receiving station, or reaching
296 * retry limit and giving up.
297 *
298 * The command queue (#4/#9) must use this mode!
299 * This mode does not require use of the Byte Count table in host DRAM.
300 *
301 * Driver controls scheduler operation via 3 means:
302 * 1) Scheduler registers
303 * 2) Shared scheduler data base in internal 4956 SRAM
304 * 3) Shared data in host DRAM
305 *
306 * Initialization:
307 *
308 * When loading, driver should allocate memory for:
309 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
310 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
311 * (1024 bytes for each queue).
312 *
313 * After receiving "Alive" response from uCode, driver must initialize
314 * the scheduler (especially for queue #4/#9, the command queue, otherwise
315 * the driver can't issue commands!):
316 */
317
318/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */
325#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64
327
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00
330
331/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode.
334 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
336
337/*
338 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening).
341 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver.
345 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
347
348/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs).
350 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
359
360/*
361 * Enables any/all Tx DMA/FIFO channels.
362 * Scheduler generates requests for only the active channels.
363 * Set this to 0xff to enable all 8 channels (normal usage).
364 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
368/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376
377/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window.
381 * Initialized by driver, updated by scheduler.
382 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384
385/*
386 * Select which queues work in chain mode (1) vs. not (0).
387 * Use chain mode to build chains of aggregated frames.
388 * Bit fields:
389 * 31-16: Reserved
390 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
391 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
395
396/*
397 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index).
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues.
404 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
406
407/*
408 * Queue search status registers. One for each queue.
409 * Sets up queue mode and assigns queue to Tx DMA channel.
410 * Bit fields:
411 * 19-10: Write mask/enable bits for bits 0-9
412 * 9: Driver should init to "0"
413 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
414 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0).
421 * Other bits should be written as "0"
422 *
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL.
425 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428
429/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434
435/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438
439/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ...
441 *
442 * Driver should clear and initialize the following areas after receiving
443 * "Alive" response from 4965 uCode, i.e. after initial
444 * uCode load, or after a uCode load done for error recovery:
445 *
446 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
447 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
448 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
449 *
450 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
451 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
452 * All OFFSET values must be added to this base address.
453 */
454
455/*
456 * Queue context. One 8-byte entry for each of 16 queues.
457 *
458 * Driver should clear this entire area (size 0x80) to 0 after receiving
459 * "Alive" notification from uCode. Additionally, driver should init
460 * each queue's entry as follows:
461 *
462 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
464 *
465 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa).
467 *
468 * Driver should init all other bits to 0.
469 *
470 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation.
472 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481
482/*
483 * Tx Status Bitmap
484 *
485 * Driver should clear this entire area (size 0x100) to 0 after receiving
486 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver.
488 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490
491/*
492 * RAxTID to queue translation mapping.
493 *
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value.
500 *
501 * Bit fields, for each 16-bit map:
502 * 15-9: Reserved, set to 0
503 * 8-4: Index into device's station table for recipient station
504 * 3-0: Traffic ID (tid), range 0-15
505 *
506 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
507 * "Alive" notification from uCode. To update a 16-bit map value, driver
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM.
510 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512
513/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516
517#define IWL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520
521/*********************** END TX SCHEDULER *************************************/
522
523#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
231 struct iwl_rx_packet *pkt)
232{
233 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
234 return;
235 if (iwl_legacy_is_any_associated(priv)) {
236 if (priv->cfg->ops->lib->check_plcp_health) {
237 if (!priv->cfg->ops->lib->check_plcp_health(
238 priv, pkt)) {
239 /*
240 * high plcp error detected
241 * reset Radio
242 */
243 iwl_legacy_force_reset(priv,
244 IWL_RF_RESET, false);
245 }
246 }
247 }
248}
249EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
250
251/*
252 * returns non-zero if packet should be dropped
253 */
254int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
255 struct ieee80211_hdr *hdr,
256 u32 decrypt_res,
257 struct ieee80211_rx_status *stats)
258{
259 u16 fc = le16_to_cpu(hdr->frame_control);
260
261 /*
262 * All contexts have the same setting here due to it being
263 * a module parameter, so OK to check any context.
264 */
265 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
266 RXON_FILTER_DIS_DECRYPT_MSK)
267 return 0;
268
269 if (!(fc & IEEE80211_FCTL_PROTECTED))
270 return 0;
271
272 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
273 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
274 case RX_RES_STATUS_SEC_TYPE_TKIP:
275 /* The uCode has got a bad phase 1 Key, pushes the packet.
276 * Decryption will be done in SW. */
277 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
278 RX_RES_STATUS_BAD_KEY_TTAK)
279 break;
280
281 case RX_RES_STATUS_SEC_TYPE_WEP:
282 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
283 RX_RES_STATUS_BAD_ICV_MIC) {
284 /* bad ICV, the packet is destroyed since the
285 * decryption is inplace, drop it */
286 IWL_DEBUG_RX(priv, "Packet destroyed\n");
287 return -1;
288 }
289 case RX_RES_STATUS_SEC_TYPE_CCMP:
290 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
291 RX_RES_STATUS_DECRYPT_OK) {
292 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
293 stats->flag |= RX_FLAG_DECRYPTED;
294 }
295 break;
296
297 default:
298 break;
299 }
300 return 0;
301}
302EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..60f597f796ca
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->is_internal_short_scan = false;
105 priv->scan_vif = NULL;
106 priv->scan_request = NULL;
107}
108
109void iwl_legacy_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_legacy_complete_scan(priv, true);
123}
124
125static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
135
136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
139 }
140
141 ret = iwl_legacy_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_legacy_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_legacy_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0;
157}
158EXPORT_SYMBOL(iwl_legacy_scan_cancel);
159
160/**
161 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
162 * @ms: amount of time to wait (in milliseconds) for scan to abort
163 *
164 */
165int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
166{
167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
168
169 lockdep_assert_held(&priv->mutex);
170
171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
172
173 iwl_legacy_do_scan_abort(priv);
174
175 while (time_before_eq(jiffies, timeout)) {
176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
177 break;
178 msleep(20);
179 }
180
181 return test_bit(STATUS_SCAN_HW, &priv->status);
182}
183EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242
243#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
246#endif
247
248 IWL_DEBUG_SCAN(priv,
249 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
250 scan_notif->scanned_channels,
251 scan_notif->tsf_low,
252 scan_notif->tsf_high, scan_notif->status);
253
254 /* The HW is no longer scanning */
255 clear_bit(STATUS_SCAN_HW, &priv->status);
256
257 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
258 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
259 jiffies_to_msecs(jiffies - priv->scan_start));
260
261 queue_work(priv->workqueue, &priv->scan_completed);
262}
263
264void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
265{
266 /* scan handlers */
267 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
268 priv->rx_handlers[SCAN_START_NOTIFICATION] =
269 iwl_legacy_rx_scan_start_notif;
270 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
271 iwl_legacy_rx_scan_results_notif;
272 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
273 iwl_legacy_rx_scan_complete_notif;
274}
275EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
276
277inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
278 enum ieee80211_band band,
279 u8 n_probes)
280{
281 if (band == IEEE80211_BAND_5GHZ)
282 return IWL_ACTIVE_DWELL_TIME_52 +
283 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
284 else
285 return IWL_ACTIVE_DWELL_TIME_24 +
286 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
287}
288EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
289
290u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
291 enum ieee80211_band band,
292 struct ieee80211_vif *vif)
293{
294 struct iwl_rxon_context *ctx;
295 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
297 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
298
299 if (iwl_legacy_is_any_associated(priv)) {
300 /*
301 * If we're associated, we clamp the maximum passive
302 * dwell time to be 98% of the smallest beacon interval
303 * (minus 2 * channel tune time)
304 */
305 for_each_context(priv, ctx) {
306 u16 value;
307
308 if (!iwl_legacy_is_associated_ctx(ctx))
309 continue;
310 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
311 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
312 value = IWL_PASSIVE_DWELL_BASE;
313 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
314 passive = min(value, passive);
315 }
316 }
317
318 return passive;
319}
320EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
321
322void iwl_legacy_init_scan_params(struct iwl_priv *priv)
323{
324 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
325 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
326 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
327 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
328 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
329}
330EXPORT_SYMBOL(iwl_legacy_init_scan_params);
331
332static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
333 struct ieee80211_vif *vif,
334 bool internal,
335 enum ieee80211_band band)
336{
337 int ret;
338
339 lockdep_assert_held(&priv->mutex);
340
341 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
342 return -EOPNOTSUPP;
343
344 cancel_delayed_work(&priv->scan_check);
345
346 if (!iwl_legacy_is_ready_rf(priv)) {
347 IWL_WARN(priv, "Request scan called when driver not ready.\n");
348 return -EIO;
349 }
350
351 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
352 IWL_DEBUG_SCAN(priv,
353 "Multiple concurrent scan requests in parallel.\n");
354 return -EBUSY;
355 }
356
357 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
358 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
359 return -EBUSY;
360 }
361
362 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
363 internal ? "internal short " : "");
364
365 set_bit(STATUS_SCANNING, &priv->status);
366 priv->is_internal_short_scan = internal;
367 priv->scan_start = jiffies;
368 priv->scan_band = band;
369
370 ret = priv->cfg->ops->utils->request_scan(priv, vif);
371 if (ret) {
372 clear_bit(STATUS_SCANNING, &priv->status);
373 priv->is_internal_short_scan = false;
374 return ret;
375 }
376
377 queue_delayed_work(priv->workqueue, &priv->scan_check,
378 IWL_SCAN_CHECK_WATCHDOG);
379
380 return 0;
381}
382
383int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
384 struct ieee80211_vif *vif,
385 struct cfg80211_scan_request *req)
386{
387 struct iwl_priv *priv = hw->priv;
388 int ret;
389
390 IWL_DEBUG_MAC80211(priv, "enter\n");
391
392 if (req->n_channels == 0)
393 return -EINVAL;
394
395 mutex_lock(&priv->mutex);
396
397 if (test_bit(STATUS_SCANNING, &priv->status) &&
398 !priv->is_internal_short_scan) {
399 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
400 ret = -EAGAIN;
401 goto out_unlock;
402 }
403
404 /* mac80211 will only ask for one band at a time */
405 priv->scan_request = req;
406 priv->scan_vif = vif;
407
408 /*
409 * If an internal scan is in progress, just set
410 * up the scan_request as per above.
411 */
412 if (priv->is_internal_short_scan) {
413 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
414 ret = 0;
415 } else
416 ret = iwl_legacy_scan_initiate(priv, vif, false,
417 req->channels[0]->band);
418
419 IWL_DEBUG_MAC80211(priv, "leave\n");
420
421out_unlock:
422 mutex_unlock(&priv->mutex);
423
424 return ret;
425}
426EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
427
428/*
429 * internal short scan, this function should only been called while associated.
430 * It will reset and tune the radio to prevent possible RF related problem
431 */
432void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
433{
434 queue_work(priv->workqueue, &priv->start_internal_scan);
435}
436
437static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
438{
439 struct iwl_priv *priv =
440 container_of(work, struct iwl_priv, start_internal_scan);
441
442 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
443
444 mutex_lock(&priv->mutex);
445
446 if (priv->is_internal_short_scan == true) {
447 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
448 goto unlock;
449 }
450
451 if (test_bit(STATUS_SCANNING, &priv->status)) {
452 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
453 goto unlock;
454 }
455
456 if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
457 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
458 unlock:
459 mutex_unlock(&priv->mutex);
460}
461
462static void iwl_legacy_bg_scan_check(struct work_struct *data)
463{
464 struct iwl_priv *priv =
465 container_of(data, struct iwl_priv, scan_check.work);
466
467 IWL_DEBUG_SCAN(priv, "Scan check work\n");
468
469 /* Since we are here firmware does not finish scan and
470 * most likely is in bad shape, so we don't bother to
471 * send abort command, just force scan complete to mac80211 */
472 mutex_lock(&priv->mutex);
473 iwl_legacy_force_scan_end(priv);
474 mutex_unlock(&priv->mutex);
475}
476
477/**
478 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
479 */
480
481u16
482iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
483 const u8 *ta, const u8 *ies, int ie_len, int left)
484{
485 int len = 0;
486 u8 *pos = NULL;
487
488 /* Make sure there is enough space for the probe request,
489 * two mandatory IEs and the data */
490 left -= 24;
491 if (left < 0)
492 return 0;
493
494 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
495 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
496 memcpy(frame->sa, ta, ETH_ALEN);
497 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
498 frame->seq_ctrl = 0;
499
500 len += 24;
501
502 /* ...next IE... */
503 pos = &frame->u.probe_req.variable[0];
504
505 /* fill in our indirect SSID IE */
506 left -= 2;
507 if (left < 0)
508 return 0;
509 *pos++ = WLAN_EID_SSID;
510 *pos++ = 0;
511
512 len += 2;
513
514 if (WARN_ON(left < ie_len))
515 return len;
516
517 if (ies && ie_len) {
518 memcpy(pos, ies, ie_len);
519 len += ie_len;
520 }
521
522 return (u16)len;
523}
524EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
525
526static void iwl_legacy_bg_abort_scan(struct work_struct *work)
527{
528 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
529
530 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
531
532 /* We keep scan_check work queued in case when firmware will not
533 * report back scan completed notification */
534 mutex_lock(&priv->mutex);
535 iwl_legacy_scan_cancel_timeout(priv, 200);
536 mutex_unlock(&priv->mutex);
537}
538
539static void iwl_legacy_bg_scan_completed(struct work_struct *work)
540{
541 struct iwl_priv *priv =
542 container_of(work, struct iwl_priv, scan_completed);
543 bool aborted;
544
545 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
546 priv->is_internal_short_scan ? "internal short " : "");
547
548 cancel_delayed_work(&priv->scan_check);
549
550 mutex_lock(&priv->mutex);
551
552 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
553 if (aborted)
554 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
555
556 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
557 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
558 goto out_settings;
559 }
560
561 if (priv->is_internal_short_scan && !aborted) {
562 int err;
563
564 /* Check if mac80211 requested scan during our internal scan */
565 if (priv->scan_request == NULL)
566 goto out_complete;
567
568 /* If so request a new scan */
569 err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
570 priv->scan_request->channels[0]->band);
571 if (err) {
572 IWL_DEBUG_SCAN(priv,
573 "failed to initiate pending scan: %d\n", err);
574 aborted = true;
575 goto out_complete;
576 }
577
578 goto out;
579 }
580
581out_complete:
582 iwl_legacy_complete_scan(priv, aborted);
583
584out_settings:
585 /* Can we still talk to firmware ? */
586 if (!iwl_legacy_is_ready_rf(priv))
587 goto out;
588
589 /*
590 * We do not commit power settings while scan is pending,
591 * do it now if the settings changed.
592 */
593 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
594 false);
595 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
596
597 priv->cfg->ops->utils->post_scan(priv);
598
599out:
600 mutex_unlock(&priv->mutex);
601}
602
603void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
604{
605 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
606 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
607 INIT_WORK(&priv->start_internal_scan,
608 iwl_legacy_bg_start_internal_scan);
609 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
610}
611EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
612
613void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
614{
615 cancel_work_sync(&priv->start_internal_scan);
616 cancel_work_sync(&priv->abort_scan);
617 cancel_work_sync(&priv->scan_completed);
618
619 if (cancel_delayed_work_sync(&priv->scan_check)) {
620 mutex_lock(&priv->mutex);
621 iwl_legacy_force_scan_end(priv);
622 mutex_unlock(&priv->mutex);
623 }
624}
625EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_spectrum_h__
30#define __iwl_legacy_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38
39/* priv->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
41{
42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv,
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr);
47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv,
50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_legacy_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv,
92 "Adding station %d failed, no block ack resource.\n",
93 sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC adress
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags);
123
124 return ret;
125}
126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
128 struct iwl_device_cmd *cmd,
129 struct iwl_rx_packet *pkt)
130{
131 struct iwl_legacy_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
135
136}
137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags)
140{
141 struct iwl_rx_packet *pkt = NULL;
142 int ret = 0;
143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = {
145 .id = REPLY_ADD_STA,
146 .flags = flags,
147 .data = data,
148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153
154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback;
156 else {
157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep();
159 }
160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd);
163
164 if (ret || (flags & CMD_ASYNC))
165 return ret;
166
167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page);
172
173 return ret;
174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta);
176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx)
180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags;
183 u8 mimo_ps_mode;
184
185 if (!sta || !sta_ht_inf->ht_supported)
186 goto done;
187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled");
194
195 sta_flags = priv->stations[index].sta.station_flags;
196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198
199 switch (mimo_ps_mode) {
200 case WLAN_HT_CAP_SM_PS_STATIC:
201 sta_flags |= STA_FLG_MIMO_DIS_MSK;
202 break;
203 case WLAN_HT_CAP_SM_PS_DYNAMIC:
204 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
205 break;
206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break;
208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break;
211 }
212
213 sta_flags |= cpu_to_le32(
214 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
215
216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223
224 priv->stations[index].sta.station_flags = sta_flags;
225 done:
226 return;
227}
228
229/**
230 * iwl_legacy_prep_station - Prepare station information for addition
231 *
232 * should be called with sta_lock held
233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{
237 struct iwl_station_entry *station;
238 int i;
239 u8 sta_id = IWL_INVALID_STATION;
240 u16 rate;
241
242 if (is_ap)
243 sta_id = ctx->ap_sta_id;
244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id;
246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
249 addr)) {
250 sta_id = i;
251 break;
252 }
253
254 if (!priv->stations[i].used &&
255 sta_id == IWL_INVALID_STATION)
256 sta_id = i;
257 }
258
259 /*
260 * These two conditions have the same outcome, but keep them
261 * separate
262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION))
264 return sta_id;
265
266 /*
267 * uCode is not able to deal with multiple requests to add a
268 * station. Keep track if one is in progress so that we do not send
269 * another.
270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv,
273 "STA %d already in process of being added.\n",
274 sta_id);
275 return sta_id;
276 }
277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv,
282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr);
284 return sta_id;
285 }
286
287 station = &priv->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
290 sta_id, addr);
291 priv->num_stations++;
292
293 /* Set up the REPLY_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id;
298 station->sta.station_flags = ctx->station_flags;
299 station->ctxid = ctx->ctxid;
300
301 if (sta) {
302 struct iwl_station_priv_common *sta_priv;
303
304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx;
306 }
307
308 /*
309 * OK to call unconditionally, since local stations (IBSS BSSID
310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS.
312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
314
315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320
321 return sta_id;
322
323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
325
326#define STA_WAIT_TIMEOUT (HZ/2)
327
328/**
329 * iwl_legacy_add_station_common -
330 */
331int
332iwl_legacy_add_station_common(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{
337 unsigned long flags_spin;
338 int ret = 0;
339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd;
341
342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
349 return -EINVAL;
350 }
351
352 /*
353 * uCode is not able to deal with multiple requests to add a
354 * station. Keep track if one is in progress so that we do not send
355 * another.
356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv,
359 "STA %d already in process of being added.\n",
360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
362 return -EEXIST;
363 }
364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv,
368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371 return -EEXIST;
372 }
373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
378
379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
388 }
389 *sta_id_r = sta_id;
390 return ret;
391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common);
393
394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
396 *
397 * priv->sta_lock must be held
398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400{
401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
411}
412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
414 const u8 *addr, int sta_id,
415 bool temporary)
416{
417 struct iwl_rx_packet *pkt;
418 int ret;
419
420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd;
422
423 struct iwl_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd),
426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd,
428 };
429
430 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
431 rm_sta_cmd.num_sta = 1;
432 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
433
434 cmd.flags |= CMD_WANT_SKB;
435
436 ret = iwl_legacy_send_cmd(priv, &cmd);
437
438 if (ret)
439 return ret;
440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
444 pkt->hdr.flags);
445 ret = -EIO;
446 }
447
448 if (!ret) {
449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock,
455 flags_spin);
456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
458 break;
459 default:
460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
462 break;
463 }
464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page);
466
467 return ret;
468}
469
470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station.
472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
474 const u8 *addr)
475{
476 unsigned long flags;
477
478 if (!iwl_legacy_is_ready(priv)) {
479 IWL_DEBUG_INFO(priv,
480 "Unable to remove station %pM, device not ready.\n",
481 addr);
482 /*
483 * It is typical for stations to be removed when we are
484 * going down. Return success since device will be down
485 * soon anyway
486 */
487 return 0;
488 }
489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
491 sta_id, addr);
492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION))
494 return -EINVAL;
495
496 spin_lock_irqsave(&priv->sta_lock, flags);
497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
500 addr);
501 goto out_err;
502 }
503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
506 addr);
507 goto out_err;
508 }
509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL;
513 }
514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
516
517 priv->num_stations--;
518
519 BUG_ON(priv->num_stations < 0);
520
521 spin_unlock_irqrestore(&priv->sta_lock, flags);
522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags);
526 return -EINVAL;
527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
529
530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits
532 *
533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON.
537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
539 struct iwl_rxon_context *ctx)
540{
541 int i;
542 unsigned long flags_spin;
543 bool cleared = false;
544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
550 continue;
551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv,
554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
556 cleared = true;
557 }
558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
560
561 if (!cleared)
562 IWL_DEBUG_INFO(priv,
563 "No active stations found to be cleared\n");
564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
566
567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device
569 *
570 * All stations considered active by driver, but not present in ucode, is
571 * restored.
572 *
573 * Function sleeps.
574 */
575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq;
580 unsigned long flags_spin;
581 int i;
582 bool found = false;
583 int ret;
584 bool send_lq;
585
586 if (!iwl_legacy_is_ready(priv)) {
587 IWL_DEBUG_INFO(priv,
588 "Not ready yet, not restoring any stations.\n");
589 return;
590 }
591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid)
596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
603 found = true;
604 }
605 }
606
607 for (i = 0; i < priv->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd));
611 send_lq = false;
612 if (priv->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd));
615 send_lq = true;
616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr);
623 priv->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock,
628 flags_spin);
629 }
630 /*
631 * Rate scaling has already been initialized, send
632 * current LQ command
633 */
634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq,
636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
639 }
640 }
641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations"
645 " .... no stations to be restored.\n");
646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations"
648 " .... complete.\n");
649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations);
651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
653{
654 int i;
655
656 for (i = 0; i < priv->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table))
658 return i;
659
660 return WEP_INVALID_OFFSET;
661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
665{
666 unsigned long flags;
667 int i;
668
669 spin_lock_irqsave(&priv->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST))
672 continue;
673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
675 priv->num_stations--;
676 BUG_ON(priv->num_stations < 0);
677 kfree(priv->stations[i].lq);
678 priv->stations[i].lq = NULL;
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags);
681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
686 struct iwl_link_quality_cmd *lq)
687{
688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk);
693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags);
697}
698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
700 struct iwl_link_quality_cmd *lq)
701{
702}
703#endif
704
705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 *
708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the
710 * current channel is not HT anymore before removing the station. In such a
711 * scenario the RXON flags will be updated to indicate we are not
712 * communicating HT anymore, but the LQ command may still contain HT rates.
713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated.
715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
717 struct iwl_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq)
719{
720 int i;
721
722 if (ctx->ht.enabled)
723 return true;
724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv,
731 "index %d of LQ expects HT channel\n",
732 i);
733 return false;
734 }
735 }
736 return true;
737}
738
739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right
742 * after station has been added.
743 *
744 * The link quality command is sent as the last step of station creation.
745 * This is the special case in which init is set and we call a callback in
746 * this case to clear the state indicating that station creation is in
747 * progress.
748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
751{
752 int ret = 0;
753 unsigned long flags_spin;
754
755 struct iwl_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd),
758 .flags = flags,
759 .data = lq,
760 };
761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
763 return -EINVAL;
764
765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
769 return -EINVAL;
770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
772
773 iwl_legacy_dump_lq_cmd(priv, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd);
778 else
779 ret = -EINVAL;
780
781 if (cmd.flags & CMD_ASYNC)
782 return ret;
783
784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete,"
786 " clearing sta addition status for sta %d\n",
787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
791 }
792 return ret;
793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta)
799{
800 struct iwl_priv *priv = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret;
803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
805 sta->addr);
806 mutex_lock(&priv->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n",
812 sta->addr);
813 mutex_unlock(&priv->mutex);
814 return ret;
815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..a227773cb384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,660 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 bool huge = false;
150 int i;
151
152 if (q->n_bd == 0)
153 return;
154
155 while (q->read_ptr != q->write_ptr) {
156 /* we have no way to tell if it is a huge cmd ATM */
157 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
158
159 if (txq->meta[i].flags & CMD_SIZE_HUGE)
160 huge = true;
161 else
162 pci_unmap_single(priv->pci_dev,
163 dma_unmap_addr(&txq->meta[i], mapping),
164 dma_unmap_len(&txq->meta[i], len),
165 PCI_DMA_BIDIRECTIONAL);
166
167 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
168 }
169
170 if (huge) {
171 i = q->n_window;
172 pci_unmap_single(priv->pci_dev,
173 dma_unmap_addr(&txq->meta[i], mapping),
174 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL);
176 }
177}
178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
179
180/**
181 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
182 * @txq: Transmit queue to deallocate.
183 *
184 * Empty queue by removing and destroying all BD's.
185 * Free all buffers.
186 * 0-fill, but do not free "txq" descriptor structure.
187 */
188void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
189{
190 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
191 struct device *dev = &priv->pci_dev->dev;
192 int i;
193
194 iwl_legacy_cmd_queue_unmap(priv);
195
196 /* De-alloc array of command/tx buffers */
197 for (i = 0; i <= TFD_CMD_SLOTS; i++)
198 kfree(txq->cmd[i]);
199
200 /* De-alloc circular buffer of TFDs */
201 if (txq->q.n_bd)
202 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
203 txq->tfds, txq->q.dma_addr);
204
205 /* deallocate arrays */
206 kfree(txq->cmd);
207 kfree(txq->meta);
208 txq->cmd = NULL;
209 txq->meta = NULL;
210
211 /* 0-fill queue descriptor structure */
212 memset(txq, 0, sizeof(*txq));
213}
214EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
215
216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
217 * DMA services
218 *
219 * Theory of operation
220 *
221 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
222 * of buffer descriptors, each of which points to one or more data buffers for
223 * the device to read from or fill. Driver and device exchange status of each
224 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
225 * entries in each circular buffer, to protect against confusing empty and full
226 * queue states.
227 *
228 * The device reads or writes the data in the queues via the device's several
229 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
230 *
231 * For Tx queue, there are low mark and high mark limits. If, after queuing
232 * the packet for Tx, free space become < low mark, Tx queue stopped. When
233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
234 * Tx queue resumed.
235 *
236 * See more detailed info in iwl-4965-hw.h.
237 ***************************************************/
238
239int iwl_legacy_queue_space(const struct iwl_queue *q)
240{
241 int s = q->read_ptr - q->write_ptr;
242
243 if (q->read_ptr > q->write_ptr)
244 s -= q->n_bd;
245
246 if (s <= 0)
247 s += q->n_window;
248 /* keep some reserve to not confuse empty and full situations */
249 s -= 2;
250 if (s < 0)
251 s = 0;
252 return s;
253}
254EXPORT_SYMBOL(iwl_legacy_queue_space);
255
256
257/**
258 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
259 */
260static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
261 int count, int slots_num, u32 id)
262{
263 q->n_bd = count;
264 q->n_window = slots_num;
265 q->id = id;
266
267 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
268 * and iwl_legacy_queue_dec_wrap are broken. */
269 BUG_ON(!is_power_of_2(count));
270
271 /* slots_num must be power-of-two size, otherwise
272 * iwl_legacy_get_cmd_index is broken. */
273 BUG_ON(!is_power_of_2(slots_num));
274
275 q->low_mark = q->n_window / 4;
276 if (q->low_mark < 4)
277 q->low_mark = 4;
278
279 q->high_mark = q->n_window / 8;
280 if (q->high_mark < 2)
281 q->high_mark = 2;
282
283 q->write_ptr = q->read_ptr = 0;
284
285 return 0;
286}
287
288/**
289 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
290 */
291static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
292 struct iwl_tx_queue *txq, u32 id)
293{
294 struct device *dev = &priv->pci_dev->dev;
295 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
296
297 /* Driver private data, only for Tx (not command) queues,
298 * not shared with device. */
299 if (id != priv->cmd_queue) {
300 txq->txb = kzalloc(sizeof(txq->txb[0]) *
301 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
302 if (!txq->txb) {
303 IWL_ERR(priv, "kmalloc for auxiliary BD "
304 "structures failed\n");
305 goto error;
306 }
307 } else {
308 txq->txb = NULL;
309 }
310
311 /* Circular buffer of transmit frame descriptors (TFDs),
312 * shared with device */
313 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
314 GFP_KERNEL);
315 if (!txq->tfds) {
316 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
317 goto error;
318 }
319 txq->q.id = id;
320
321 return 0;
322
323 error:
324 kfree(txq->txb);
325 txq->txb = NULL;
326
327 return -ENOMEM;
328}
329
330/**
331 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
332 */
333int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
334 int slots_num, u32 txq_id)
335{
336 int i, len;
337 int ret;
338 int actual_slots = slots_num;
339
340 /*
341 * Alloc buffer array for commands (Tx or other types of commands).
342 * For the command queue (#4/#9), allocate command space + one big
343 * command for scan, since scan command is very huge; the system will
344 * not have two scans at the same time, so only one is needed.
345 * For normal Tx queues (all other queues), no super-size command
346 * space is needed.
347 */
348 if (txq_id == priv->cmd_queue)
349 actual_slots++;
350
351 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
352 GFP_KERNEL);
353 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
354 GFP_KERNEL);
355
356 if (!txq->meta || !txq->cmd)
357 goto out_free_arrays;
358
359 len = sizeof(struct iwl_device_cmd);
360 for (i = 0; i < actual_slots; i++) {
361 /* only happens for cmd queue */
362 if (i == slots_num)
363 len = IWL_MAX_CMD_SIZE;
364
365 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
366 if (!txq->cmd[i])
367 goto err;
368 }
369
370 /* Alloc driver data array and TFD circular buffer */
371 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
372 if (ret)
373 goto err;
374
375 txq->need_update = 0;
376
377 /*
378 * For the default queues 0-3, set up the swq_id
379 * already -- all others need to get one later
380 * (if they need one at all).
381 */
382 if (txq_id < 4)
383 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
384
385 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
386 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
387 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
388
389 /* Initialize queue's high/low-water marks, and head/tail indexes */
390 iwl_legacy_queue_init(priv, &txq->q,
391 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
392
393 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq);
395
396 return 0;
397err:
398 for (i = 0; i < actual_slots; i++)
399 kfree(txq->cmd[i]);
400out_free_arrays:
401 kfree(txq->meta);
402 kfree(txq->cmd);
403
404 return -ENOMEM;
405}
406EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
407
408void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id)
410{
411 int actual_slots = slots_num;
412
413 if (txq_id == priv->cmd_queue)
414 actual_slots++;
415
416 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
417
418 txq->need_update = 0;
419
420 /* Initialize queue's high/low-water marks, and head/tail indexes */
421 iwl_legacy_queue_init(priv, &txq->q,
422 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
423
424 /* Tell device where to find queue */
425 priv->cfg->ops->lib->txq_init(priv, txq);
426}
427EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
428
429/*************** HOST COMMAND QUEUE FUNCTIONS *****/
430
431/**
432 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
433 * @priv: device private data point
434 * @cmd: a point to the ucode command structure
435 *
436 * The function returns < 0 values to indicate the operation is
437 * failed. On success, it turns the index (> 0) of command in the
438 * command queue.
439 */
440int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
441{
442 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
443 struct iwl_queue *q = &txq->q;
444 struct iwl_device_cmd *out_cmd;
445 struct iwl_cmd_meta *out_meta;
446 dma_addr_t phys_addr;
447 unsigned long flags;
448 int len;
449 u32 idx;
450 u16 fix_size;
451
452 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
453 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
454
455 /* If any of the command structures end up being larger than
456 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
457 * we will need to increase the size of the TFD entries
458 * Also, check to see if command buffer should not exceed the size
459 * of device_cmd and max_cmd_size. */
460 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
461 !(cmd->flags & CMD_SIZE_HUGE));
462 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
463
464 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
465 IWL_WARN(priv, "Not sending command - %s KILL\n",
466 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
467 return -EIO;
468 }
469
470 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
471 IWL_ERR(priv, "No space in command queue\n");
472 IWL_ERR(priv, "Restarting adapter due to queue full\n");
473 queue_work(priv->workqueue, &priv->restart);
474 return -ENOSPC;
475 }
476
477 spin_lock_irqsave(&priv->hcmd_lock, flags);
478
479 /* If this is a huge cmd, mark the huge flag also on the meta.flags
480 * of the _original_ cmd. This is used for DMA mapping clean up.
481 */
482 if (cmd->flags & CMD_SIZE_HUGE) {
483 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
484 txq->meta[idx].flags = CMD_SIZE_HUGE;
485 }
486
487 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
488 out_cmd = txq->cmd[idx];
489 out_meta = &txq->meta[idx];
490
491 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
492 out_meta->flags = cmd->flags;
493 if (cmd->flags & CMD_WANT_SKB)
494 out_meta->source = cmd;
495 if (cmd->flags & CMD_ASYNC)
496 out_meta->callback = cmd->callback;
497
498 out_cmd->hdr.cmd = cmd->id;
499 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
500
501 /* At this point, the out_cmd now has all of the incoming cmd
502 * information */
503
504 out_cmd->hdr.flags = 0;
505 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
506 INDEX_TO_SEQ(q->write_ptr));
507 if (cmd->flags & CMD_SIZE_HUGE)
508 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
509 len = sizeof(struct iwl_device_cmd);
510 if (idx == TFD_CMD_SLOTS)
511 len = IWL_MAX_CMD_SIZE;
512
513#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
514 switch (out_cmd->hdr.cmd) {
515 case REPLY_TX_LINK_QUALITY_CMD:
516 case SENSITIVITY_CMD:
517 IWL_DEBUG_HC_DUMP(priv,
518 "Sending command %s (#%x), seq: 0x%04X, "
519 "%d bytes at %d[%d]:%d\n",
520 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
521 out_cmd->hdr.cmd,
522 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
523 q->write_ptr, idx, priv->cmd_queue);
524 break;
525 default:
526 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
527 "%d bytes at %d[%d]:%d\n",
528 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
529 out_cmd->hdr.cmd,
530 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
531 q->write_ptr, idx, priv->cmd_queue);
532 }
533#endif
534 txq->need_update = 1;
535
536 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
537 /* Set up entry in queue's byte count circular buffer */
538 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
539
540 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
541 fix_size, PCI_DMA_BIDIRECTIONAL);
542 dma_unmap_addr_set(out_meta, mapping, phys_addr);
543 dma_unmap_len_set(out_meta, len, fix_size);
544
545 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
546 fix_size, cmd->flags);
547
548 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
549 phys_addr, fix_size, 1,
550 U32_PAD(cmd->len));
551
552 /* Increment and update queue's write index */
553 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
554 iwl_legacy_txq_update_write_ptr(priv, txq);
555
556 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
557 return idx;
558}
559
560/**
561 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
562 *
563 * When FW advances 'R' index, all entries between old and new 'R' index
564 * need to be reclaimed. As result, some free space forms. If there is
565 * enough free space (> low mark), wake the stack that feeds us.
566 */
567static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
568 int idx, int cmd_idx)
569{
570 struct iwl_tx_queue *txq = &priv->txq[txq_id];
571 struct iwl_queue *q = &txq->q;
572 int nfreed = 0;
573
574 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
575 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
576 "is out of range [0-%d] %d %d.\n", txq_id,
577 idx, q->n_bd, q->write_ptr, q->read_ptr);
578 return;
579 }
580
581 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
582 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
583
584 if (nfreed++ > 0) {
585 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
586 q->write_ptr, q->read_ptr);
587 queue_work(priv->workqueue, &priv->restart);
588 }
589
590 }
591}
592
593/**
594 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
595 * @rxb: Rx buffer to reclaim
596 *
597 * If an Rx buffer has an async callback associated with it the callback
598 * will be executed. The attached skb (if present) will only be freed
599 * if the callback returns 1
600 */
601void
602iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
603{
604 struct iwl_rx_packet *pkt = rxb_addr(rxb);
605 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
606 int txq_id = SEQ_TO_QUEUE(sequence);
607 int index = SEQ_TO_INDEX(sequence);
608 int cmd_index;
609 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
610 struct iwl_device_cmd *cmd;
611 struct iwl_cmd_meta *meta;
612 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
613
614 /* If a Tx command is being handled and it isn't in the actual
615 * command queue then there a command routing bug has been introduced
616 * in the queue management code. */
617 if (WARN(txq_id != priv->cmd_queue,
618 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
619 txq_id, priv->cmd_queue, sequence,
620 priv->txq[priv->cmd_queue].q.read_ptr,
621 priv->txq[priv->cmd_queue].q.write_ptr)) {
622 iwl_print_hex_error(priv, pkt, 32);
623 return;
624 }
625
626 /* If this is a huge cmd, clear the huge flag on the meta.flags
627 * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
628 * the DMA buffer for the scan (huge) command.
629 */
630 if (huge) {
631 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
632 txq->meta[cmd_index].flags = 0;
633 }
634 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
635 cmd = txq->cmd[cmd_index];
636 meta = &txq->meta[cmd_index];
637
638 pci_unmap_single(priv->pci_dev,
639 dma_unmap_addr(meta, mapping),
640 dma_unmap_len(meta, len),
641 PCI_DMA_BIDIRECTIONAL);
642
643 /* Input error checking is done when commands are added to queue. */
644 if (meta->flags & CMD_WANT_SKB) {
645 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
646 rxb->page = NULL;
647 } else if (meta->callback)
648 meta->callback(priv, cmd, pkt);
649
650 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
651
652 if (!(meta->flags & CMD_ASYNC)) {
653 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
654 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
655 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
656 wake_up_interruptible(&priv->wait_command_queue);
657 }
658 meta->flags = 0;
659}
660EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index adcef735180a..ab87e1b73529 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -61,7 +61,6 @@
61#include "iwl-helpers.h" 61#include "iwl-helpers.h"
62#include "iwl-dev.h" 62#include "iwl-dev.h"
63#include "iwl-spectrum.h" 63#include "iwl-spectrum.h"
64#include "iwl-legacy.h"
65 64
66/* 65/*
67 * module name, copyright, version, etc. 66 * module name, copyright, version, etc.
@@ -70,7 +69,7 @@
70#define DRV_DESCRIPTION \ 69#define DRV_DESCRIPTION \
71"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
72 71
73#ifdef CONFIG_IWLWIFI_DEBUG 72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
74#define VD "d" 73#define VD "d"
75#else 74#else
76#define VD 75#define VD
@@ -82,7 +81,7 @@
82 * this was configurable. 81 * this was configurable.
83 */ 82 */
84#define DRV_VERSION IWLWIFI_VERSION VD "s" 83#define DRV_VERSION IWLWIFI_VERSION VD "s"
85#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" 84#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
86#define DRV_AUTHOR "<ilw@linux.intel.com>" 85#define DRV_AUTHOR "<ilw@linux.intel.com>"
87 86
88MODULE_DESCRIPTION(DRV_DESCRIPTION); 87MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
164 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
165 == STA_KEY_FLG_NO_ENC) 164 == STA_KEY_FLG_NO_ENC)
166 priv->stations[sta_id].sta.key.key_offset = 165 priv->stations[sta_id].sta.key.key_offset =
167 iwl_get_free_ucode_key_index(priv); 166 iwl_legacy_get_free_ucode_key_index(priv);
168 /* else, we are overriding an existing key => no need to allocated room 167 /* else, we are overriding an existing key => no need to allocated room
169 * in uCode. */ 168 * in uCode. */
170 169
@@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
177 176
178 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); 177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
179 178
180 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181 181
182 spin_unlock_irqrestore(&priv->sta_lock, flags); 182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183 183
@@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) 201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{ 202{
203 unsigned long flags; 203 unsigned long flags;
204 struct iwl_addsta_cmd sta_cmd; 204 struct iwl_legacy_addsta_cmd sta_cmd;
205 205
206 spin_lock_irqsave(&priv->sta_lock, flags); 206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags); 214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215 215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218} 218}
219 219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv, 220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
318 int left) 318 int left)
319{ 319{
320 320
321 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) 321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0; 322 return 0;
323 323
324 if (priv->beacon_skb->len > left) 324 if (priv->beacon_skb->len > left)
@@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 346
347 rate = iwl_rate_get_lowest_plcp(priv, 347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]); 348 &priv->contexts[IWL_RXON_CTX_BSS]);
349 349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351 351
352 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]); 353 &frame->u.cmd[0]);
354 354
355 iwl3945_free_frame(priv, frame); 355 iwl3945_free_frame(priv, frame);
@@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 } 444 }
445 445
446 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); 446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447 447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) { 449 if (ieee80211_is_mgmt(fc)) {
@@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
485 unsigned long flags; 485 unsigned long flags;
486 486
487 spin_lock_irqsave(&priv->lock, flags); 487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_is_rfkill(priv)) { 488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock; 490 goto drop_unlock;
491 } 491 }
@@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
500 500
501 fc = hdr->frame_control; 501 fc = hdr->frame_control;
502 502
503#ifdef CONFIG_IWLWIFI_DEBUG 503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc)) 504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); 505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc)) 506 else if (ieee80211_is_assoc_req(fc))
@@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
514 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
515 515
516 /* Find index into station table for destination station */ 516 /* Find index into station table for destination station */
517 sta_id = iwl_sta_id_or_broadcast( 517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS], 518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta); 519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) { 520 if (sta_id == IWL_INVALID_STATION) {
@@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
536 txq = &priv->txq[txq_id]; 536 txq = &priv->txq[txq_id];
537 q = &txq->q; 537 q = &txq->q;
538 538
539 if ((iwl_queue_space(q) < q->high_mark)) 539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop; 540 goto drop;
541 541
542 spin_lock_irqsave(&priv->lock, flags); 542 spin_lock_irqsave(&priv->lock, flags);
543 543
544 idx = get_cmd_index(q, q->write_ptr, 0); 544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545 545
546 /* Set up driver data for this TFD */ 546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
582 len = (u16)skb->len; 582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len); 583 tx_cmd->len = cpu_to_le16(len);
584 584
585 iwl_dbg_log_tx_data_frame(priv, len, hdr); 585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_update_stats(priv, true, fc, len); 586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589 589
@@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
642 642
643 643
644 /* Tell device the write index *just past* this latest filled TFD */ 644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_txq_update_write_ptr(priv, txq); 646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags); 647 spin_unlock_irqrestore(&priv->lock, flags);
648 648
649 if ((iwl_queue_space(q) < q->high_mark) 649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) { 650 && priv->mac80211_registered) {
651 if (wait_write_ptr) { 651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags); 652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1; 653 txq->need_update = 1;
654 iwl_txq_update_write_ptr(priv, txq); 654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags); 655 spin_unlock_irqrestore(&priv->lock, flags);
656 } 656 }
657 657
658 iwl_stop_queue(priv, txq); 658 iwl_legacy_stop_queue(priv, txq);
659 } 659 }
660 660
661 return 0; 661 return 0;
@@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
683 int duration = le16_to_cpu(params->duration); 683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685 685
686 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_usecs_to_beacons(priv, 687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval)); 689 le16_to_cpu(ctx->timing.beacon_interval));
690 690
@@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
697 cmd.len = sizeof(spectrum); 697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699 699
700 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time = 701 spectrum.start_time =
702 iwl_add_beacon_time(priv, 702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time, 703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval)); 704 le16_to_cpu(ctx->timing.beacon_interval));
705 else 705 else
@@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714 714
715 rc = iwl_send_cmd_sync(priv, &cmd); 715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc) 716 if (rc)
717 return rc; 717 return rc;
718 718
@@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
739 break; 739 break;
740 } 740 }
741 741
742 iwl_free_pages(priv, cmd.reply_page); 742 iwl_legacy_free_pages(priv, cmd.reply_page);
743 743
744 return rc; 744 return rc;
745} 745}
@@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, 783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb) 784 struct iwl_rx_mem_buffer *rxb)
785{ 785{
786#ifdef CONFIG_IWLWIFI_DEBUG 786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb); 787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif 788#endif
789 789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791} 791}
792 792
793static void iwl3945_bg_beacon_update(struct work_struct *work)
794{
795 struct iwl_priv *priv =
796 container_of(work, struct iwl_priv, beacon_update);
797 struct sk_buff *beacon;
798
799 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
800 beacon = ieee80211_beacon_get(priv->hw,
801 priv->contexts[IWL_RXON_CTX_BSS].vif);
802
803 if (!beacon) {
804 IWL_ERR(priv, "update beacon failed\n");
805 return;
806 }
807
808 mutex_lock(&priv->mutex);
809 /* new beacon skb is allocated every time; dispose previous.*/
810 if (priv->beacon_skb)
811 dev_kfree_skb(priv->beacon_skb);
812
813 priv->beacon_skb = beacon;
814 mutex_unlock(&priv->mutex);
815
816 iwl3945_send_beacon_cmd(priv);
817}
818
819static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
820 struct iwl_rx_mem_buffer *rxb) 794 struct iwl_rx_mem_buffer *rxb)
821{ 795{
822 struct iwl_rx_packet *pkt = rxb_addr(rxb); 796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
823 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
824#ifdef CONFIG_IWLWIFI_DEBUG 798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
825 u8 rate = beacon->beacon_notify_hdr.rate; 799 u8 rate = beacon->beacon_notify_hdr.rate;
826 800
827 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
835 809
836 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
837 811
838 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
839 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
840 queue_work(priv->workqueue, &priv->beacon_update);
841} 812}
842 813
843/* Handle notification from uCode that card's power state is changing 814/* Handle notification from uCode that card's power state is changing
@@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
862 clear_bit(STATUS_RF_KILL_HW, &priv->status); 833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
863 834
864 835
865 iwl_scan_cancel(priv); 836 iwl_legacy_scan_cancel(priv);
866 837
867 if ((test_bit(STATUS_RF_KILL_HW, &status) != 838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
868 test_bit(STATUS_RF_KILL_HW, &priv->status))) 839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
885{ 856{
886 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; 857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
887 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
888 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
889 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
890 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
891 iwl_rx_spectrum_measure_notif; 862 iwl_legacy_rx_spectrum_measure_notif;
892 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
893 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
894 iwl_rx_pm_debug_statistics_notif; 865 iwl_legacy_rx_pm_debug_statistics_notif;
895 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; 866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
896 867
897 /* 868 /*
@@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
902 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; 873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
903 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
904 875
905 iwl_setup_rx_scan_handlers(priv); 876 iwl_legacy_setup_rx_scan_handlers(priv);
906 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
907 878
908 /* Set up hardware specific Rx handlers */ 879 /* Set up hardware specific Rx handlers */
@@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1003 974
1004 spin_lock_irqsave(&rxq->lock, flags); 975 spin_lock_irqsave(&rxq->lock, flags);
1005 write = rxq->write & ~0x7; 976 write = rxq->write & ~0x7;
1006 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
1007 /* Get next free Rx buffer, remove from free list */ 978 /* Get next free Rx buffer, remove from free list */
1008 element = rxq->rx_free.next; 979 element = rxq->rx_free.next;
1009 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1029 spin_lock_irqsave(&rxq->lock, flags); 1000 spin_lock_irqsave(&rxq->lock, flags);
1030 rxq->need_update = 1; 1001 rxq->need_update = 1;
1031 spin_unlock_irqrestore(&rxq->lock, flags); 1002 spin_unlock_irqrestore(&rxq->lock, flags);
1032 iwl_rx_queue_update_write_ptr(priv, rxq); 1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1033 } 1004 }
1034} 1005}
1035 1006
@@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1123 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1124 PAGE_SIZE << priv->hw_params.rx_page_order, 1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1125 PCI_DMA_FROMDEVICE); 1096 PCI_DMA_FROMDEVICE);
1126 __iwl_free_pages(priv, rxq->pool[i].page); 1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1127 rxq->pool[i].page = NULL; 1098 rxq->pool[i].page = NULL;
1128 } 1099 }
1129 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1170 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1171 PAGE_SIZE << priv->hw_params.rx_page_order, 1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1172 PCI_DMA_FROMDEVICE); 1143 PCI_DMA_FROMDEVICE);
1173 __iwl_free_pages(priv, rxq->pool[i].page); 1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1174 rxq->pool[i].page = NULL; 1145 rxq->pool[i].page = NULL;
1175 } 1146 }
1176 } 1147 }
@@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1275 1246
1276 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1277 len += sizeof(u32); /* account for status word */ 1248 len += sizeof(u32); /* account for status word */
1278 trace_iwlwifi_dev_rx(priv, pkt, len); 1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1279 1250
1280 /* Reclaim a command buffer only if this packet is a response 1251 /* Reclaim a command buffer only if this packet is a response
1281 * to a (driver-originated) command. 1252 * to a (driver-originated) command.
@@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1292 * rx_handlers table. See iwl3945_setup_rx_handlers() */ 1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1293 if (priv->rx_handlers[pkt->hdr.cmd]) { 1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1294 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1295 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1296 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1297 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1298 } else { 1269 } else {
1299 /* No handling needed */ 1270 /* No handling needed */
1300 IWL_DEBUG_RX(priv, 1271 IWL_DEBUG_RX(priv,
1301 "r %d i %d No handler needed for %s, 0x%02x\n", 1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1302 r, i, get_cmd_string(pkt->hdr.cmd), 1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1303 pkt->hdr.cmd); 1274 pkt->hdr.cmd);
1304 } 1275 }
1305 1276
@@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1312 1283
1313 if (reclaim) { 1284 if (reclaim) {
1314 /* Invoke any callbacks, transfer the buffer to caller, 1285 /* Invoke any callbacks, transfer the buffer to caller,
1315 * and fire off the (possibly) blocking iwl_send_cmd() 1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1316 * as we reclaim the driver command queue */ 1287 * as we reclaim the driver command queue */
1317 if (rxb->page) 1288 if (rxb->page)
1318 iwl_tx_cmd_complete(priv, rxb); 1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1319 else 1290 else
1320 IWL_WARN(priv, "Claim null rxb?\n"); 1291 IWL_WARN(priv, "Claim null rxb?\n");
1321 } 1292 }
@@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1357} 1328}
1358 1329
1359/* call this function to flush any scheduled tasklet */ 1330/* call this function to flush any scheduled tasklet */
1360static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1361{ 1332{
1362 /* wait to make sure we flush pending tasklet*/ 1333 /* wait to make sure we flush pending tasklet*/
1363 synchronize_irq(priv->pci_dev->irq); 1334 synchronize_irq(priv->pci_dev->irq);
1364 tasklet_kill(&priv->irq_tasklet); 1335 tasklet_kill(&priv->irq_tasklet);
1365} 1336}
1366 1337
1367static const char *desc_lookup(int i) 1338static const char *iwl3945_desc_lookup(int i)
1368{ 1339{
1369 switch (i) { 1340 switch (i) {
1370 case 1: 1341 case 1:
@@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1401 } 1372 }
1402 1373
1403 1374
1404 count = iwl_read_targ_mem(priv, base); 1375 count = iwl_legacy_read_targ_mem(priv, base);
1405 1376
1406 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1407 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1414 for (i = ERROR_START_OFFSET; 1385 for (i = ERROR_START_OFFSET;
1415 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1416 i += ERROR_ELEM_SIZE) { 1387 i += ERROR_ELEM_SIZE) {
1417 desc = iwl_read_targ_mem(priv, base + i); 1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1418 time = 1389 time =
1419 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); 1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1420 blink1 = 1391 blink1 =
1421 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); 1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1422 blink2 = 1393 blink2 =
1423 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); 1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1424 ilink1 = 1395 ilink1 =
1425 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); 1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1426 ilink2 = 1397 ilink2 =
1427 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); 1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1428 data1 = 1399 data1 =
1429 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1430 1401
1431 IWL_ERR(priv, 1402 IWL_ERR(priv,
1432 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1433 desc_lookup(desc), desc, time, blink1, blink2, 1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1434 ilink1, ilink2, data1); 1405 ilink1, ilink2, data1);
1435 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, 1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1436 0, blink1, blink2, ilink1, ilink2); 1407 0, blink1, blink2, ilink1, ilink2);
1437 } 1408 }
1438} 1409}
@@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1471 iwl_grab_nic_access(priv); 1442 iwl_grab_nic_access(priv);
1472 1443
1473 /* Set starting address; reads will auto-increment */ 1444 /* Set starting address; reads will auto-increment */
1474 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 1445 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1475 rmb(); 1446 rmb();
1476 1447
1477 /* "time" is actually "data" for mode 0 (no timestamp). 1448 /* "time" is actually "data" for mode 0 (no timestamp).
1478 * place event id # at far right for easier visual parsing. */ 1449 * place event id # at far right for easier visual parsing. */
1479 for (i = 0; i < num_events; i++) { 1450 for (i = 0; i < num_events; i++) {
1480 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1451 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1481 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1452 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1482 if (mode == 0) { 1453 if (mode == 0) {
1483 /* data, ev */ 1454 /* data, ev */
1484 if (bufsz) { 1455 if (bufsz) {
@@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1487 time, ev); 1458 time, ev);
1488 } else { 1459 } else {
1489 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1460 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1490 trace_iwlwifi_dev_ucode_event(priv, 0, 1461 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1491 time, ev); 1462 time, ev);
1492 } 1463 }
1493 } else { 1464 } else {
1494 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1465 data = _iwl_legacy_read_direct32(priv,
1466 HBUS_TARG_MEM_RDAT);
1495 if (bufsz) { 1467 if (bufsz) {
1496 pos += scnprintf(*buf + pos, bufsz - pos, 1468 pos += scnprintf(*buf + pos, bufsz - pos,
1497 "%010u:0x%08x:%04u\n", 1469 "%010u:0x%08x:%04u\n",
@@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1499 } else { 1471 } else {
1500 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", 1472 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1501 time, data, ev); 1473 time, data, ev);
1502 trace_iwlwifi_dev_ucode_event(priv, time, 1474 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1503 data, ev); 1475 data, ev);
1504 } 1476 }
1505 } 1477 }
@@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1570 } 1542 }
1571 1543
1572 /* event log header */ 1544 /* event log header */
1573 capacity = iwl_read_targ_mem(priv, base); 1545 capacity = iwl_legacy_read_targ_mem(priv, base);
1574 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 1546 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1575 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1547 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1576 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1548 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1577 1549
1578 if (capacity > priv->cfg->base_params->max_event_log_size) { 1550 if (capacity > priv->cfg->base_params->max_event_log_size) {
1579 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1551 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1595 return pos; 1567 return pos;
1596 } 1568 }
1597 1569
1598#ifdef CONFIG_IWLWIFI_DEBUG 1570#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1599 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 1571 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1600 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1572 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1601 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1573 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1602#else 1574#else
@@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1607 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", 1579 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1608 size); 1580 size);
1609 1581
1610#ifdef CONFIG_IWLWIFI_DEBUG 1582#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1611 if (display) { 1583 if (display) {
1612 if (full_log) 1584 if (full_log)
1613 bufsz = capacity * 48; 1585 bufsz = capacity * 48;
@@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1617 if (!*buf) 1589 if (!*buf)
1618 return -ENOMEM; 1590 return -ENOMEM;
1619 } 1591 }
1620 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1592 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1621 /* if uCode has wrapped back to top of log, 1593 /* if uCode has wrapped back to top of log,
1622 * start at the oldest entry, 1594 * start at the oldest entry,
1623 * i.e the next one that uCode would fill. 1595 * i.e the next one that uCode would fill.
@@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1647 u32 inta, handled = 0; 1619 u32 inta, handled = 0;
1648 u32 inta_fh; 1620 u32 inta_fh;
1649 unsigned long flags; 1621 unsigned long flags;
1650#ifdef CONFIG_IWLWIFI_DEBUG 1622#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1651 u32 inta_mask; 1623 u32 inta_mask;
1652#endif 1624#endif
1653 1625
@@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1665 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1637 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1666 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1638 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1667 1639
1668#ifdef CONFIG_IWLWIFI_DEBUG 1640#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1669 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1641 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1670 /* just for debug */ 1642 /* just for debug */
1671 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1643 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1672 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1644 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1690 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1662 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1691 1663
1692 /* Tell the device to stop sending interrupts */ 1664 /* Tell the device to stop sending interrupts */
1693 iwl_disable_interrupts(priv); 1665 iwl_legacy_disable_interrupts(priv);
1694 1666
1695 priv->isr_stats.hw++; 1667 priv->isr_stats.hw++;
1696 iwl_irq_handle_error(priv); 1668 iwl_legacy_irq_handle_error(priv);
1697 1669
1698 handled |= CSR_INT_BIT_HW_ERR; 1670 handled |= CSR_INT_BIT_HW_ERR;
1699 1671
1700 return; 1672 return;
1701 } 1673 }
1702 1674
1703#ifdef CONFIG_IWLWIFI_DEBUG 1675#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1704 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1676 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1705 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1677 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1706 if (inta & CSR_INT_BIT_SCD) { 1678 if (inta & CSR_INT_BIT_SCD) {
1707 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1679 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1724 IWL_ERR(priv, "Microcode SW error detected. " 1696 IWL_ERR(priv, "Microcode SW error detected. "
1725 "Restarting 0x%X.\n", inta); 1697 "Restarting 0x%X.\n", inta);
1726 priv->isr_stats.sw++; 1698 priv->isr_stats.sw++;
1727 iwl_irq_handle_error(priv); 1699 iwl_legacy_irq_handle_error(priv);
1728 handled |= CSR_INT_BIT_SW_ERR; 1700 handled |= CSR_INT_BIT_SW_ERR;
1729 } 1701 }
1730 1702
1731 /* uCode wakes up after power-down sleep */ 1703 /* uCode wakes up after power-down sleep */
1732 if (inta & CSR_INT_BIT_WAKEUP) { 1704 if (inta & CSR_INT_BIT_WAKEUP) {
1733 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1705 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1734 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1706 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1735 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1707 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1736 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1708 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1737 iwl_txq_update_write_ptr(priv, &priv->txq[2]); 1709 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1738 iwl_txq_update_write_ptr(priv, &priv->txq[3]); 1710 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1739 iwl_txq_update_write_ptr(priv, &priv->txq[4]); 1711 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1740 iwl_txq_update_write_ptr(priv, &priv->txq[5]); 1712 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1741 1713
1742 priv->isr_stats.wakeup++; 1714 priv->isr_stats.wakeup++;
1743 handled |= CSR_INT_BIT_WAKEUP; 1715 handled |= CSR_INT_BIT_WAKEUP;
@@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1757 priv->isr_stats.tx++; 1729 priv->isr_stats.tx++;
1758 1730
1759 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); 1731 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1760 iwl_write_direct32(priv, FH39_TCSR_CREDIT 1732 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1761 (FH39_SRVC_CHNL), 0x0); 1733 (FH39_SRVC_CHNL), 0x0);
1762 handled |= CSR_INT_BIT_FH_TX; 1734 handled |= CSR_INT_BIT_FH_TX;
1763 } 1735 }
@@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1776 /* Re-enable all interrupts */ 1748 /* Re-enable all interrupts */
1777 /* only Re-enable if disabled by irq */ 1749 /* only Re-enable if disabled by irq */
1778 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1750 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1779 iwl_enable_interrupts(priv); 1751 iwl_legacy_enable_interrupts(priv);
1780 1752
1781#ifdef CONFIG_IWLWIFI_DEBUG 1753#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1782 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1754 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1783 inta = iwl_read32(priv, CSR_INT); 1755 inta = iwl_read32(priv, CSR_INT);
1784 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1756 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1785 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1757 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
1806 return added; 1778 return added;
1807 } 1779 }
1808 1780
1809 active_dwell = iwl_get_active_dwell_time(priv, band, 0); 1781 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
1810 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1782 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1811 1783
1812 if (passive_dwell <= active_dwell) 1784 if (passive_dwell <= active_dwell)
1813 passive_dwell = active_dwell + 1; 1785 passive_dwell = active_dwell + 1;
1814 1786
1815 1787
1816 channel = iwl_get_single_channel_number(priv, band); 1788 channel = iwl_legacy_get_single_channel_number(priv, band);
1817 1789
1818 if (channel) { 1790 if (channel) {
1819 scan_ch->channel = channel; 1791 scan_ch->channel = channel;
@@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1849 if (!sband) 1821 if (!sband)
1850 return 0; 1822 return 0;
1851 1823
1852 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1824 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1853 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1825 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1854 1826
1855 if (passive_dwell <= active_dwell) 1827 if (passive_dwell <= active_dwell)
1856 passive_dwell = active_dwell + 1; 1828 passive_dwell = active_dwell + 1;
@@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1863 1835
1864 scan_ch->channel = chan->hw_value; 1836 scan_ch->channel = chan->hw_value;
1865 1837
1866 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); 1838 ch_info = iwl_legacy_get_channel_info(priv, band,
1867 if (!is_channel_valid(ch_info)) { 1839 scan_ch->channel);
1868 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", 1840 if (!iwl_legacy_is_channel_valid(ch_info)) {
1869 scan_ch->channel); 1841 IWL_DEBUG_SCAN(priv,
1842 "Channel %d is INVALID for this band.\n",
1843 scan_ch->channel);
1870 continue; 1844 continue;
1871 } 1845 }
1872 1846
@@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1875 /* If passive , set up for auto-switch 1849 /* If passive , set up for auto-switch
1876 * and use long active_dwell time. 1850 * and use long active_dwell time.
1877 */ 1851 */
1878 if (!is_active || is_channel_passive(ch_info) || 1852 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1879 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { 1853 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1880 scan_ch->type = 0; /* passive */ 1854 scan_ch->type = 0; /* passive */
1881 if (IWL_UCODE_API(priv->ucode_ver) == 1) 1855 if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1955 1929
1956static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) 1930static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1957{ 1931{
1958 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1932 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1959 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1933 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1960 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1934 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1961 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1935 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1962 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1936 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1963 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1937 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1964} 1938}
1965 1939
1966/** 1940/**
@@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1976 1950
1977 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 1951 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1978 1952
1979 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 1953 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1980 IWL39_RTC_INST_LOWER_BOUND); 1954 IWL39_RTC_INST_LOWER_BOUND);
1981 1955
1982 errcnt = 0; 1956 errcnt = 0;
@@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1984 /* read data comes through single port, auto-incr addr */ 1958 /* read data comes through single port, auto-incr addr */
1985 /* NOTE: Use the debugless read so we don't flood kernel log 1959 /* NOTE: Use the debugless read so we don't flood kernel log
1986 * if IWL_DL_IO is set */ 1960 * if IWL_DL_IO is set */
1987 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1961 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1988 if (val != le32_to_cpu(*image)) { 1962 if (val != le32_to_cpu(*image)) {
1989 IWL_ERR(priv, "uCode INST section is invalid at " 1963 IWL_ERR(priv, "uCode INST section is invalid at "
1990 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1964 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
2023 /* read data comes through single port, auto-incr addr */ 1997 /* read data comes through single port, auto-incr addr */
2024 /* NOTE: Use the debugless read so we don't flood kernel log 1998 /* NOTE: Use the debugless read so we don't flood kernel log
2025 * if IWL_DL_IO is set */ 1999 * if IWL_DL_IO is set */
2026 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 2000 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2027 i + IWL39_RTC_INST_LOWER_BOUND); 2001 i + IWL39_RTC_INST_LOWER_BOUND);
2028 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2002 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2029 if (val != le32_to_cpu(*image)) { 2003 if (val != le32_to_cpu(*image)) {
2030#if 0 /* Enable this if you want to see details */ 2004#if 0 /* Enable this if you want to see details */
2031 IWL_ERR(priv, "uCode INST section is invalid at " 2005 IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
2101#define IWL3945_UCODE_GET(item) \ 2075#define IWL3945_UCODE_GET(item) \
2102static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ 2076static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2103{ \ 2077{ \
2104 return le32_to_cpu(ucode->u.v1.item); \ 2078 return le32_to_cpu(ucode->v1.item); \
2105} 2079}
2106 2080
2107static u32 iwl3945_ucode_get_header_size(u32 api_ver) 2081static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2111 2085
2112static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) 2086static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2113{ 2087{
2114 return (u8 *) ucode->u.v1.data; 2088 return (u8 *) ucode->v1.data;
2115} 2089}
2116 2090
2117IWL3945_UCODE_GET(inst_size); 2091IWL3945_UCODE_GET(inst_size);
@@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2286 * 1) unmodified from disk 2260 * 1) unmodified from disk
2287 * 2) backup cache for save/restore during power-downs */ 2261 * 2) backup cache for save/restore during power-downs */
2288 priv->ucode_code.len = inst_size; 2262 priv->ucode_code.len = inst_size;
2289 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 2263 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2290 2264
2291 priv->ucode_data.len = data_size; 2265 priv->ucode_data.len = data_size;
2292 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 2266 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2293 2267
2294 priv->ucode_data_backup.len = data_size; 2268 priv->ucode_data_backup.len = data_size;
2295 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 2269 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2296 2270
2297 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 2271 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2298 !priv->ucode_data_backup.v_addr) 2272 !priv->ucode_data_backup.v_addr)
@@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2301 /* Initialization instructions and data */ 2275 /* Initialization instructions and data */
2302 if (init_size && init_data_size) { 2276 if (init_size && init_data_size) {
2303 priv->ucode_init.len = init_size; 2277 priv->ucode_init.len = init_size;
2304 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 2278 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2305 2279
2306 priv->ucode_init_data.len = init_data_size; 2280 priv->ucode_init_data.len = init_data_size;
2307 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 2281 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2308 2282
2309 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 2283 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2310 goto err_pci_alloc; 2284 goto err_pci_alloc;
@@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2313 /* Bootstrap (instructions only, no data) */ 2287 /* Bootstrap (instructions only, no data) */
2314 if (boot_size) { 2288 if (boot_size) {
2315 priv->ucode_boot.len = boot_size; 2289 priv->ucode_boot.len = boot_size;
2316 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 2290 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2317 2291
2318 if (!priv->ucode_boot.v_addr) 2292 if (!priv->ucode_boot.v_addr)
2319 goto err_pci_alloc; 2293 goto err_pci_alloc;
@@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2400 pdata = priv->ucode_data_backup.p_addr; 2374 pdata = priv->ucode_data_backup.p_addr;
2401 2375
2402 /* Tell bootstrap uCode where to find image to load */ 2376 /* Tell bootstrap uCode where to find image to load */
2403 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2377 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2404 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2378 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2405 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 2379 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2406 priv->ucode_data.len); 2380 priv->ucode_data.len);
2407 2381
2408 /* Inst byte count must be last to set up, bit 31 signals uCode 2382 /* Inst byte count must be last to set up, bit 31 signals uCode
2409 * that all new ptr/size info is in place */ 2383 * that all new ptr/size info is in place */
2410 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 2384 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2411 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 2385 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2412 2386
2413 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 2387 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2488 goto restart; 2462 goto restart;
2489 } 2463 }
2490 2464
2491 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2465 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2492 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2466 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2493 2467
2494 if (rfkill & 0x1) { 2468 if (rfkill & 0x1) {
@@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2510 set_bit(STATUS_ALIVE, &priv->status); 2484 set_bit(STATUS_ALIVE, &priv->status);
2511 2485
2512 /* Enable watchdog to monitor the driver tx queues */ 2486 /* Enable watchdog to monitor the driver tx queues */
2513 iwl_setup_watchdog(priv); 2487 iwl_legacy_setup_watchdog(priv);
2514 2488
2515 if (iwl_is_rfkill(priv)) 2489 if (iwl_legacy_is_rfkill(priv))
2516 return; 2490 return;
2517 2491
2518 ieee80211_wake_queues(priv->hw); 2492 ieee80211_wake_queues(priv->hw);
2519 2493
2520 priv->active_rate = IWL_RATES_MASK_3945; 2494 priv->active_rate = IWL_RATES_MASK_3945;
2521 2495
2522 iwl_power_update_mode(priv, true); 2496 iwl_legacy_power_update_mode(priv, true);
2523 2497
2524 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2498 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2525 struct iwl3945_rxon_cmd *active_rxon = 2499 struct iwl3945_rxon_cmd *active_rxon =
2526 (struct iwl3945_rxon_cmd *)(&ctx->active); 2500 (struct iwl3945_rxon_cmd *)(&ctx->active);
2527 2501
@@ -2529,11 +2503,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2529 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2503 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2530 } else { 2504 } else {
2531 /* Initialize our rx_config data */ 2505 /* Initialize our rx_config data */
2532 iwl_connection_init_rx_config(priv, ctx); 2506 iwl_legacy_connection_init_rx_config(priv, ctx);
2533 } 2507 }
2534 2508
2535 /* Configure Bluetooth device coexistence support */ 2509 /* Configure Bluetooth device coexistence support */
2536 priv->cfg->ops->hcmd->send_bt_config(priv); 2510 iwl_legacy_send_bt_config(priv);
2537 2511
2538 set_bit(STATUS_READY, &priv->status); 2512 set_bit(STATUS_READY, &priv->status);
2539 2513
@@ -2560,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2560 2534
2561 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2535 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2562 2536
2563 iwl_scan_cancel_timeout(priv, 200); 2537 iwl_legacy_scan_cancel_timeout(priv, 200);
2564 2538
2565 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 2539 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2566 2540
@@ -2569,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
2569 del_timer_sync(&priv->watchdog); 2543 del_timer_sync(&priv->watchdog);
2570 2544
2571 /* Station information will now be cleared in device */ 2545 /* Station information will now be cleared in device */
2572 iwl_clear_ucode_stations(priv, NULL); 2546 iwl_legacy_clear_ucode_stations(priv, NULL);
2573 iwl_dealloc_bcast_stations(priv); 2547 iwl_legacy_dealloc_bcast_stations(priv);
2574 iwl_clear_driver_stations(priv); 2548 iwl_legacy_clear_driver_stations(priv);
2575 2549
2576 /* Unblock any waiting calls */ 2550 /* Unblock any waiting calls */
2577 wake_up_interruptible_all(&priv->wait_command_queue); 2551 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2586,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
2586 2560
2587 /* tell the device to stop sending interrupts */ 2561 /* tell the device to stop sending interrupts */
2588 spin_lock_irqsave(&priv->lock, flags); 2562 spin_lock_irqsave(&priv->lock, flags);
2589 iwl_disable_interrupts(priv); 2563 iwl_legacy_disable_interrupts(priv);
2590 spin_unlock_irqrestore(&priv->lock, flags); 2564 spin_unlock_irqrestore(&priv->lock, flags);
2591 iwl_synchronize_irq(priv); 2565 iwl3945_synchronize_irq(priv);
2592 2566
2593 if (priv->mac80211_registered) 2567 if (priv->mac80211_registered)
2594 ieee80211_stop_queues(priv->hw); 2568 ieee80211_stop_queues(priv->hw);
2595 2569
2596 /* If we have not previously called iwl3945_init() then 2570 /* If we have not previously called iwl3945_init() then
2597 * clear all bits but the RF Kill bits and return */ 2571 * clear all bits but the RF Kill bits and return */
2598 if (!iwl_is_init(priv)) { 2572 if (!iwl_legacy_is_init(priv)) {
2599 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2573 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2600 STATUS_RF_KILL_HW | 2574 STATUS_RF_KILL_HW |
2601 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2620,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
2620 iwl3945_hw_rxq_stop(priv); 2594 iwl3945_hw_rxq_stop(priv);
2621 2595
2622 /* Power-down device's busmaster DMA clocks */ 2596 /* Power-down device's busmaster DMA clocks */
2623 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2597 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2624 udelay(5); 2598 udelay(5);
2625 2599
2626 /* Stop the device, and put it in low power state */ 2600 /* Stop the device, and put it in low power state */
2627 iwl_apm_stop(priv); 2601 iwl_legacy_apm_stop(priv);
2628 2602
2629 exit: 2603 exit:
2630 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2604 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2655,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2655 u8 sta_id; 2629 u8 sta_id;
2656 2630
2657 spin_lock_irqsave(&priv->sta_lock, flags); 2631 spin_lock_irqsave(&priv->sta_lock, flags);
2658 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); 2632 sta_id = iwl_legacy_prep_station(priv, ctx,
2633 iwlegacy_bcast_addr, false, NULL);
2659 if (sta_id == IWL_INVALID_STATION) { 2634 if (sta_id == IWL_INVALID_STATION) {
2660 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 2635 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2661 spin_unlock_irqrestore(&priv->sta_lock, flags); 2636 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2713,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
2713 2688
2714 /* clear (again), then enable host interrupts */ 2689 /* clear (again), then enable host interrupts */
2715 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2690 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2716 iwl_enable_interrupts(priv); 2691 iwl_legacy_enable_interrupts(priv);
2717 2692
2718 /* really make sure rfkill handshake bits are cleared */ 2693 /* really make sure rfkill handshake bits are cleared */
2719 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2694 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2855,7 +2830,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2855 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2830 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2856 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2831 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2857 2832
2858 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2833 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2859 u16 interval = 0; 2834 u16 interval = 0;
2860 u32 extra; 2835 u32 extra;
2861 u32 suspend_time = 100; 2836 u32 suspend_time = 100;
@@ -2943,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2943 2918
2944 if (!priv->is_internal_short_scan) { 2919 if (!priv->is_internal_short_scan) {
2945 scan->tx_cmd.len = cpu_to_le16( 2920 scan->tx_cmd.len = cpu_to_le16(
2946 iwl_fill_probe_req(priv, 2921 iwl_legacy_fill_probe_req(priv,
2947 (struct ieee80211_mgmt *)scan->data, 2922 (struct ieee80211_mgmt *)scan->data,
2948 vif->addr, 2923 vif->addr,
2949 priv->scan_request->ie, 2924 priv->scan_request->ie,
@@ -2952,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2952 } else { 2927 } else {
2953 /* use bcast addr, will not be transmitted but must be valid */ 2928 /* use bcast addr, will not be transmitted but must be valid */
2954 scan->tx_cmd.len = cpu_to_le16( 2929 scan->tx_cmd.len = cpu_to_le16(
2955 iwl_fill_probe_req(priv, 2930 iwl_legacy_fill_probe_req(priv,
2956 (struct ieee80211_mgmt *)scan->data, 2931 (struct ieee80211_mgmt *)scan->data,
2957 iwl_bcast_addr, NULL, 0, 2932 iwlegacy_bcast_addr, NULL, 0,
2958 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2933 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2959 } 2934 }
2960 /* select Rx antennas */ 2935 /* select Rx antennas */
@@ -2982,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2982 scan->len = cpu_to_le16(cmd.len); 2957 scan->len = cpu_to_le16(cmd.len);
2983 2958
2984 set_bit(STATUS_SCAN_HW, &priv->status); 2959 set_bit(STATUS_SCAN_HW, &priv->status);
2985 ret = iwl_send_cmd_sync(priv, &cmd); 2960 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2986 if (ret) 2961 if (ret)
2987 clear_bit(STATUS_SCAN_HW, &priv->status); 2962 clear_bit(STATUS_SCAN_HW, &priv->status);
2988 return ret; 2963 return ret;
@@ -3050,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3050 if (!ctx->vif || !priv->is_open) 3025 if (!ctx->vif || !priv->is_open)
3051 return; 3026 return;
3052 3027
3053 if (ctx->vif->type == NL80211_IFTYPE_AP) {
3054 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3055 return;
3056 }
3057
3058 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3028 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3059 ctx->vif->bss_conf.aid, ctx->active.bssid_addr); 3029 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3060 3030
3061 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3031 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3062 return; 3032 return;
3063 3033
3064 iwl_scan_cancel_timeout(priv, 200); 3034 iwl_legacy_scan_cancel_timeout(priv, 200);
3065 3035
3066 conf = ieee80211_get_hw_conf(priv->hw); 3036 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
3067 3037
3068 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3038 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3069 iwl3945_commit_rxon(priv, ctx); 3039 iwl3945_commit_rxon(priv, ctx);
3070 3040
3071 rc = iwl_send_rxon_timing(priv, ctx); 3041 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3072 if (rc) 3042 if (rc)
3073 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3043 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3074 "Attempting to continue.\n"); 3044 "Attempting to continue.\n");
@@ -3200,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3200 IWL_DEBUG_MAC80211(priv, "leave\n"); 3170 IWL_DEBUG_MAC80211(priv, "leave\n");
3201} 3171}
3202 3172
3203static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3173static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3204{ 3174{
3205 struct iwl_priv *priv = hw->priv; 3175 struct iwl_priv *priv = hw->priv;
3206 3176
@@ -3213,7 +3183,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3213 dev_kfree_skb_any(skb); 3183 dev_kfree_skb_any(skb);
3214 3184
3215 IWL_DEBUG_MAC80211(priv, "leave\n"); 3185 IWL_DEBUG_MAC80211(priv, "leave\n");
3216 return NETDEV_TX_OK;
3217} 3186}
3218 3187
3219void iwl3945_config_ap(struct iwl_priv *priv) 3188void iwl3945_config_ap(struct iwl_priv *priv)
@@ -3226,14 +3195,14 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3226 return; 3195 return;
3227 3196
3228 /* The following should be done only at AP bring up */ 3197 /* The following should be done only at AP bring up */
3229 if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) { 3198 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
3230 3199
3231 /* RXON - unassoc (to set timing command) */ 3200 /* RXON - unassoc (to set timing command) */
3232 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3201 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3233 iwl3945_commit_rxon(priv, ctx); 3202 iwl3945_commit_rxon(priv, ctx);
3234 3203
3235 /* RXON Timing */ 3204 /* RXON Timing */
3236 rc = iwl_send_rxon_timing(priv, ctx); 3205 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3237 if (rc) 3206 if (rc)
3238 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3207 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3239 "Attempting to continue.\n"); 3208 "Attempting to continue.\n");
@@ -3260,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3260 iwl3945_commit_rxon(priv, ctx); 3229 iwl3945_commit_rxon(priv, ctx);
3261 } 3230 }
3262 iwl3945_send_beacon_cmd(priv); 3231 iwl3945_send_beacon_cmd(priv);
3263
3264 /* FIXME - we need to add code here to detect a totally new
3265 * configuration, reset the AP, unassoc, rxon timing, assoc,
3266 * clear sta table, add BCAST sta... */
3267} 3232}
3268 3233
3269static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3234static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3291,17 +3256,17 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3291 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 3256 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3292 return -EOPNOTSUPP; 3257 return -EOPNOTSUPP;
3293 3258
3294 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); 3259 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
3295 3260
3296 if (!static_key) { 3261 if (!static_key) {
3297 sta_id = iwl_sta_id_or_broadcast( 3262 sta_id = iwl_legacy_sta_id_or_broadcast(
3298 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); 3263 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3299 if (sta_id == IWL_INVALID_STATION) 3264 if (sta_id == IWL_INVALID_STATION)
3300 return -EINVAL; 3265 return -EINVAL;
3301 } 3266 }
3302 3267
3303 mutex_lock(&priv->mutex); 3268 mutex_lock(&priv->mutex);
3304 iwl_scan_cancel_timeout(priv, 100); 3269 iwl_legacy_scan_cancel_timeout(priv, 100);
3305 3270
3306 switch (cmd) { 3271 switch (cmd) {
3307 case SET_KEY: 3272 case SET_KEY:
@@ -3346,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3346 sta_priv->common.sta_id = IWL_INVALID_STATION; 3311 sta_priv->common.sta_id = IWL_INVALID_STATION;
3347 3312
3348 3313
3349 ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS], 3314 ret = iwl_legacy_add_station_common(priv,
3315 &priv->contexts[IWL_RXON_CTX_BSS],
3350 sta->addr, is_ap, sta, &sta_id); 3316 sta->addr, is_ap, sta, &sta_id);
3351 if (ret) { 3317 if (ret) {
3352 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3318 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3407,7 +3373,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3407 3373
3408 /* 3374 /*
3409 * Receiving all multicast frames is always enabled by the 3375 * Receiving all multicast frames is always enabled by the
3410 * default flags setup in iwl_connection_init_rx_config() 3376 * default flags setup in iwl_legacy_connection_init_rx_config()
3411 * since we currently do not support programming multicast 3377 * since we currently do not support programming multicast
3412 * filters into the device. 3378 * filters into the device.
3413 */ 3379 */
@@ -3422,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3422 * 3388 *
3423 *****************************************************************************/ 3389 *****************************************************************************/
3424 3390
3425#ifdef CONFIG_IWLWIFI_DEBUG 3391#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3426 3392
3427/* 3393/*
3428 * The following adds a new attribute to the sysfs representation 3394 * The following adds a new attribute to the sysfs representation
@@ -3435,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3435 * level that is used instead of the global debug level if it (the per 3401 * level that is used instead of the global debug level if it (the per
3436 * device debug level) is set. 3402 * device debug level) is set.
3437 */ 3403 */
3438static ssize_t show_debug_level(struct device *d, 3404static ssize_t iwl3945_show_debug_level(struct device *d,
3439 struct device_attribute *attr, char *buf) 3405 struct device_attribute *attr, char *buf)
3440{ 3406{
3441 struct iwl_priv *priv = dev_get_drvdata(d); 3407 struct iwl_priv *priv = dev_get_drvdata(d);
3442 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 3408 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3443} 3409}
3444static ssize_t store_debug_level(struct device *d, 3410static ssize_t iwl3945_store_debug_level(struct device *d,
3445 struct device_attribute *attr, 3411 struct device_attribute *attr,
3446 const char *buf, size_t count) 3412 const char *buf, size_t count)
3447{ 3413{
@@ -3454,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d,
3454 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); 3420 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3455 else { 3421 else {
3456 priv->debug_level = val; 3422 priv->debug_level = val;
3457 if (iwl_alloc_traffic_mem(priv)) 3423 if (iwl_legacy_alloc_traffic_mem(priv))
3458 IWL_ERR(priv, 3424 IWL_ERR(priv,
3459 "Not enough memory to generate traffic log\n"); 3425 "Not enough memory to generate traffic log\n");
3460 } 3426 }
@@ -3462,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d,
3462} 3428}
3463 3429
3464static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, 3430static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3465 show_debug_level, store_debug_level); 3431 iwl3945_show_debug_level, iwl3945_store_debug_level);
3466 3432
3467#endif /* CONFIG_IWLWIFI_DEBUG */ 3433#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3468 3434
3469static ssize_t show_temperature(struct device *d, 3435static ssize_t iwl3945_show_temperature(struct device *d,
3470 struct device_attribute *attr, char *buf) 3436 struct device_attribute *attr, char *buf)
3471{ 3437{
3472 struct iwl_priv *priv = dev_get_drvdata(d); 3438 struct iwl_priv *priv = dev_get_drvdata(d);
3473 3439
3474 if (!iwl_is_alive(priv)) 3440 if (!iwl_legacy_is_alive(priv))
3475 return -EAGAIN; 3441 return -EAGAIN;
3476 3442
3477 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); 3443 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3478} 3444}
3479 3445
3480static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3446static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3481 3447
3482static ssize_t show_tx_power(struct device *d, 3448static ssize_t iwl3945_show_tx_power(struct device *d,
3483 struct device_attribute *attr, char *buf) 3449 struct device_attribute *attr, char *buf)
3484{ 3450{
3485 struct iwl_priv *priv = dev_get_drvdata(d); 3451 struct iwl_priv *priv = dev_get_drvdata(d);
3486 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 3452 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3487} 3453}
3488 3454
3489static ssize_t store_tx_power(struct device *d, 3455static ssize_t iwl3945_store_tx_power(struct device *d,
3490 struct device_attribute *attr, 3456 struct device_attribute *attr,
3491 const char *buf, size_t count) 3457 const char *buf, size_t count)
3492{ 3458{
@@ -3503,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d,
3503 return count; 3469 return count;
3504} 3470}
3505 3471
3506static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3472static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3507 3473
3508static ssize_t show_flags(struct device *d, 3474static ssize_t iwl3945_show_flags(struct device *d,
3509 struct device_attribute *attr, char *buf) 3475 struct device_attribute *attr, char *buf)
3510{ 3476{
3511 struct iwl_priv *priv = dev_get_drvdata(d); 3477 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3514,7 +3480,7 @@ static ssize_t show_flags(struct device *d,
3514 return sprintf(buf, "0x%04X\n", ctx->active.flags); 3480 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3515} 3481}
3516 3482
3517static ssize_t store_flags(struct device *d, 3483static ssize_t iwl3945_store_flags(struct device *d,
3518 struct device_attribute *attr, 3484 struct device_attribute *attr,
3519 const char *buf, size_t count) 3485 const char *buf, size_t count)
3520{ 3486{
@@ -3525,7 +3491,7 @@ static ssize_t store_flags(struct device *d,
3525 mutex_lock(&priv->mutex); 3491 mutex_lock(&priv->mutex);
3526 if (le32_to_cpu(ctx->staging.flags) != flags) { 3492 if (le32_to_cpu(ctx->staging.flags) != flags) {
3527 /* Cancel any currently running scans... */ 3493 /* Cancel any currently running scans... */
3528 if (iwl_scan_cancel_timeout(priv, 100)) 3494 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3529 IWL_WARN(priv, "Could not cancel scan.\n"); 3495 IWL_WARN(priv, "Could not cancel scan.\n");
3530 else { 3496 else {
3531 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3497 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3539,9 +3505,9 @@ static ssize_t store_flags(struct device *d,
3539 return count; 3505 return count;
3540} 3506}
3541 3507
3542static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); 3508static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3543 3509
3544static ssize_t show_filter_flags(struct device *d, 3510static ssize_t iwl3945_show_filter_flags(struct device *d,
3545 struct device_attribute *attr, char *buf) 3511 struct device_attribute *attr, char *buf)
3546{ 3512{
3547 struct iwl_priv *priv = dev_get_drvdata(d); 3513 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3551,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d,
3551 le32_to_cpu(ctx->active.filter_flags)); 3517 le32_to_cpu(ctx->active.filter_flags));
3552} 3518}
3553 3519
3554static ssize_t store_filter_flags(struct device *d, 3520static ssize_t iwl3945_store_filter_flags(struct device *d,
3555 struct device_attribute *attr, 3521 struct device_attribute *attr,
3556 const char *buf, size_t count) 3522 const char *buf, size_t count)
3557{ 3523{
@@ -3562,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d,
3562 mutex_lock(&priv->mutex); 3528 mutex_lock(&priv->mutex);
3563 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { 3529 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3564 /* Cancel any currently running scans... */ 3530 /* Cancel any currently running scans... */
3565 if (iwl_scan_cancel_timeout(priv, 100)) 3531 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3566 IWL_WARN(priv, "Could not cancel scan.\n"); 3532 IWL_WARN(priv, "Could not cancel scan.\n");
3567 else { 3533 else {
3568 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3534 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3577,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d,
3577 return count; 3543 return count;
3578} 3544}
3579 3545
3580static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3546static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3581 store_filter_flags); 3547 iwl3945_store_filter_flags);
3582 3548
3583static ssize_t show_measurement(struct device *d, 3549static ssize_t iwl3945_show_measurement(struct device *d,
3584 struct device_attribute *attr, char *buf) 3550 struct device_attribute *attr, char *buf)
3585{ 3551{
3586 struct iwl_priv *priv = dev_get_drvdata(d); 3552 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3612,7 +3578,7 @@ static ssize_t show_measurement(struct device *d,
3612 return len; 3578 return len;
3613} 3579}
3614 3580
3615static ssize_t store_measurement(struct device *d, 3581static ssize_t iwl3945_store_measurement(struct device *d,
3616 struct device_attribute *attr, 3582 struct device_attribute *attr,
3617 const char *buf, size_t count) 3583 const char *buf, size_t count)
3618{ 3584{
@@ -3649,9 +3615,9 @@ static ssize_t store_measurement(struct device *d,
3649} 3615}
3650 3616
3651static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3617static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3652 show_measurement, store_measurement); 3618 iwl3945_show_measurement, iwl3945_store_measurement);
3653 3619
3654static ssize_t store_retry_rate(struct device *d, 3620static ssize_t iwl3945_store_retry_rate(struct device *d,
3655 struct device_attribute *attr, 3621 struct device_attribute *attr,
3656 const char *buf, size_t count) 3622 const char *buf, size_t count)
3657{ 3623{
@@ -3664,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d,
3664 return count; 3630 return count;
3665} 3631}
3666 3632
3667static ssize_t show_retry_rate(struct device *d, 3633static ssize_t iwl3945_show_retry_rate(struct device *d,
3668 struct device_attribute *attr, char *buf) 3634 struct device_attribute *attr, char *buf)
3669{ 3635{
3670 struct iwl_priv *priv = dev_get_drvdata(d); 3636 struct iwl_priv *priv = dev_get_drvdata(d);
3671 return sprintf(buf, "%d", priv->retry_rate); 3637 return sprintf(buf, "%d", priv->retry_rate);
3672} 3638}
3673 3639
3674static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, 3640static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3675 store_retry_rate); 3641 iwl3945_store_retry_rate);
3676 3642
3677 3643
3678static ssize_t show_channels(struct device *d, 3644static ssize_t iwl3945_show_channels(struct device *d,
3679 struct device_attribute *attr, char *buf) 3645 struct device_attribute *attr, char *buf)
3680{ 3646{
3681 /* all this shit doesn't belong into sysfs anyway */ 3647 /* all this shit doesn't belong into sysfs anyway */
3682 return 0; 3648 return 0;
3683} 3649}
3684 3650
3685static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3651static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3686 3652
3687static ssize_t show_antenna(struct device *d, 3653static ssize_t iwl3945_show_antenna(struct device *d,
3688 struct device_attribute *attr, char *buf) 3654 struct device_attribute *attr, char *buf)
3689{ 3655{
3690 struct iwl_priv *priv = dev_get_drvdata(d); 3656 struct iwl_priv *priv = dev_get_drvdata(d);
3691 3657
3692 if (!iwl_is_alive(priv)) 3658 if (!iwl_legacy_is_alive(priv))
3693 return -EAGAIN; 3659 return -EAGAIN;
3694 3660
3695 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); 3661 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3696} 3662}
3697 3663
3698static ssize_t store_antenna(struct device *d, 3664static ssize_t iwl3945_store_antenna(struct device *d,
3699 struct device_attribute *attr, 3665 struct device_attribute *attr,
3700 const char *buf, size_t count) 3666 const char *buf, size_t count)
3701{ 3667{
@@ -3720,20 +3686,20 @@ static ssize_t store_antenna(struct device *d,
3720 return count; 3686 return count;
3721} 3687}
3722 3688
3723static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); 3689static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3724 3690
3725static ssize_t show_status(struct device *d, 3691static ssize_t iwl3945_show_status(struct device *d,
3726 struct device_attribute *attr, char *buf) 3692 struct device_attribute *attr, char *buf)
3727{ 3693{
3728 struct iwl_priv *priv = dev_get_drvdata(d); 3694 struct iwl_priv *priv = dev_get_drvdata(d);
3729 if (!iwl_is_alive(priv)) 3695 if (!iwl_legacy_is_alive(priv))
3730 return -EAGAIN; 3696 return -EAGAIN;
3731 return sprintf(buf, "0x%08x\n", (int)priv->status); 3697 return sprintf(buf, "0x%08x\n", (int)priv->status);
3732} 3698}
3733 3699
3734static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 3700static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3735 3701
3736static ssize_t dump_error_log(struct device *d, 3702static ssize_t iwl3945_dump_error_log(struct device *d,
3737 struct device_attribute *attr, 3703 struct device_attribute *attr,
3738 const char *buf, size_t count) 3704 const char *buf, size_t count)
3739{ 3705{
@@ -3746,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d,
3746 return strnlen(buf, count); 3712 return strnlen(buf, count);
3747} 3713}
3748 3714
3749static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3715static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3750 3716
3751/***************************************************************************** 3717/*****************************************************************************
3752 * 3718 *
@@ -3762,18 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3762 3728
3763 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3729 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3764 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3730 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3765 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3766 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3731 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3767 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3732 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3768 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3733 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3769 3734
3770 iwl_setup_scan_deferred_work(priv); 3735 iwl_legacy_setup_scan_deferred_work(priv);
3771 3736
3772 iwl3945_hw_setup_deferred_work(priv); 3737 iwl3945_hw_setup_deferred_work(priv);
3773 3738
3774 init_timer(&priv->watchdog); 3739 init_timer(&priv->watchdog);
3775 priv->watchdog.data = (unsigned long)priv; 3740 priv->watchdog.data = (unsigned long)priv;
3776 priv->watchdog.function = iwl_bg_watchdog; 3741 priv->watchdog.function = iwl_legacy_bg_watchdog;
3777 3742
3778 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3743 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3779 iwl3945_irq_tasklet, (unsigned long)priv); 3744 iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3785,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3785 3750
3786 cancel_delayed_work_sync(&priv->init_alive_start); 3751 cancel_delayed_work_sync(&priv->init_alive_start);
3787 cancel_delayed_work(&priv->alive_start); 3752 cancel_delayed_work(&priv->alive_start);
3788 cancel_work_sync(&priv->beacon_update);
3789 3753
3790 iwl_cancel_scan_deferred_work(priv); 3754 iwl_legacy_cancel_scan_deferred_work(priv);
3791} 3755}
3792 3756
3793static struct attribute *iwl3945_sysfs_entries[] = { 3757static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3801,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3801 &dev_attr_status.attr, 3765 &dev_attr_status.attr,
3802 &dev_attr_temperature.attr, 3766 &dev_attr_temperature.attr,
3803 &dev_attr_tx_power.attr, 3767 &dev_attr_tx_power.attr,
3804#ifdef CONFIG_IWLWIFI_DEBUG 3768#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3805 &dev_attr_debug_level.attr, 3769 &dev_attr_debug_level.attr,
3806#endif 3770#endif
3807 NULL 3771 NULL
@@ -3816,19 +3780,19 @@ struct ieee80211_ops iwl3945_hw_ops = {
3816 .tx = iwl3945_mac_tx, 3780 .tx = iwl3945_mac_tx,
3817 .start = iwl3945_mac_start, 3781 .start = iwl3945_mac_start,
3818 .stop = iwl3945_mac_stop, 3782 .stop = iwl3945_mac_stop,
3819 .add_interface = iwl_mac_add_interface, 3783 .add_interface = iwl_legacy_mac_add_interface,
3820 .remove_interface = iwl_mac_remove_interface, 3784 .remove_interface = iwl_legacy_mac_remove_interface,
3821 .change_interface = iwl_mac_change_interface, 3785 .change_interface = iwl_legacy_mac_change_interface,
3822 .config = iwl_legacy_mac_config, 3786 .config = iwl_legacy_mac_config,
3823 .configure_filter = iwl3945_configure_filter, 3787 .configure_filter = iwl3945_configure_filter,
3824 .set_key = iwl3945_mac_set_key, 3788 .set_key = iwl3945_mac_set_key,
3825 .conf_tx = iwl_mac_conf_tx, 3789 .conf_tx = iwl_legacy_mac_conf_tx,
3826 .reset_tsf = iwl_legacy_mac_reset_tsf, 3790 .reset_tsf = iwl_legacy_mac_reset_tsf,
3827 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 3791 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3828 .hw_scan = iwl_mac_hw_scan, 3792 .hw_scan = iwl_legacy_mac_hw_scan,
3829 .sta_add = iwl3945_mac_sta_add, 3793 .sta_add = iwl3945_mac_sta_add,
3830 .sta_remove = iwl_mac_sta_remove, 3794 .sta_remove = iwl_legacy_mac_sta_remove,
3831 .tx_last_beacon = iwl_mac_tx_last_beacon, 3795 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3832}; 3796};
3833 3797
3834static int iwl3945_init_drv(struct iwl_priv *priv) 3798static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3870,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3870 ret = -EINVAL; 3834 ret = -EINVAL;
3871 goto err; 3835 goto err;
3872 } 3836 }
3873 ret = iwl_init_channel_map(priv); 3837 ret = iwl_legacy_init_channel_map(priv);
3874 if (ret) { 3838 if (ret) {
3875 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3839 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3876 goto err; 3840 goto err;
@@ -3882,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3882 goto err_free_channel_map; 3846 goto err_free_channel_map;
3883 } 3847 }
3884 3848
3885 ret = iwlcore_init_geos(priv); 3849 ret = iwl_legacy_init_geos(priv);
3886 if (ret) { 3850 if (ret) {
3887 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3851 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3888 goto err_free_channel_map; 3852 goto err_free_channel_map;
@@ -3892,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3892 return 0; 3856 return 0;
3893 3857
3894err_free_channel_map: 3858err_free_channel_map:
3895 iwl_free_channel_map(priv); 3859 iwl_legacy_free_channel_map(priv);
3896err: 3860err:
3897 return ret; 3861 return ret;
3898} 3862}
@@ -3912,10 +3876,6 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3912 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3876 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3913 IEEE80211_HW_SPECTRUM_MGMT; 3877 IEEE80211_HW_SPECTRUM_MGMT;
3914 3878
3915 if (!priv->cfg->base_params->broken_powersave)
3916 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3917 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3918
3919 hw->wiphy->interface_modes = 3879 hw->wiphy->interface_modes =
3920 priv->contexts[IWL_RXON_CTX_BSS].interface_modes; 3880 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3921 3881
@@ -3938,7 +3898,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3938 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3898 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3939 &priv->bands[IEEE80211_BAND_5GHZ]; 3899 &priv->bands[IEEE80211_BAND_5GHZ];
3940 3900
3941 iwl_leds_init(priv); 3901 iwl_legacy_leds_init(priv);
3942 3902
3943 ret = ieee80211_register_hw(priv->hw); 3903 ret = ieee80211_register_hw(priv->hw);
3944 if (ret) { 3904 if (ret) {
@@ -3965,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3965 3925
3966 /* mac80211 allocates memory for this device instance, including 3926 /* mac80211 allocates memory for this device instance, including
3967 * space for this driver's private structure */ 3927 * space for this driver's private structure */
3968 hw = iwl_alloc_all(cfg); 3928 hw = iwl_legacy_alloc_all(cfg);
3969 if (hw == NULL) { 3929 if (hw == NULL) {
3970 pr_err("Can not allocate network device\n"); 3930 pr_err("Can not allocate network device\n");
3971 err = -ENOMEM; 3931 err = -ENOMEM;
@@ -4005,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4005 iwl3945_hw_ops.hw_scan = NULL; 3965 iwl3945_hw_ops.hw_scan = NULL;
4006 } 3966 }
4007 3967
4008
4009 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 3968 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
4010 priv->cfg = cfg; 3969 priv->cfg = cfg;
4011 priv->pci_dev = pdev; 3970 priv->pci_dev = pdev;
4012 priv->inta_mask = CSR_INI_SET_MASK; 3971 priv->inta_mask = CSR_INI_SET_MASK;
4013 3972
4014 if (iwl_alloc_traffic_mem(priv)) 3973 if (iwl_legacy_alloc_traffic_mem(priv))
4015 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3974 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
4016 3975
4017 /*************************** 3976 /***************************
@@ -4075,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4075 * ********************/ 4034 * ********************/
4076 4035
4077 /* Read the EEPROM */ 4036 /* Read the EEPROM */
4078 err = iwl_eeprom_init(priv); 4037 err = iwl_legacy_eeprom_init(priv);
4079 if (err) { 4038 if (err) {
4080 IWL_ERR(priv, "Unable to init EEPROM\n"); 4039 IWL_ERR(priv, "Unable to init EEPROM\n");
4081 goto out_iounmap; 4040 goto out_iounmap;
@@ -4112,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4112 * ********************/ 4071 * ********************/
4113 4072
4114 spin_lock_irqsave(&priv->lock, flags); 4073 spin_lock_irqsave(&priv->lock, flags);
4115 iwl_disable_interrupts(priv); 4074 iwl_legacy_disable_interrupts(priv);
4116 spin_unlock_irqrestore(&priv->lock, flags); 4075 spin_unlock_irqrestore(&priv->lock, flags);
4117 4076
4118 pci_enable_msi(priv->pci_dev); 4077 pci_enable_msi(priv->pci_dev);
4119 4078
4120 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, 4079 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
4121 IRQF_SHARED, DRV_NAME, priv); 4080 IRQF_SHARED, DRV_NAME, priv);
4122 if (err) { 4081 if (err) {
4123 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4082 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4130,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4130 goto out_release_irq; 4089 goto out_release_irq;
4131 } 4090 }
4132 4091
4133 iwl_set_rxon_channel(priv, 4092 iwl_legacy_set_rxon_channel(priv,
4134 &priv->bands[IEEE80211_BAND_2GHZ].channels[5], 4093 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4135 &priv->contexts[IWL_RXON_CTX_BSS]); 4094 &priv->contexts[IWL_RXON_CTX_BSS]);
4136 iwl3945_setup_deferred_work(priv); 4095 iwl3945_setup_deferred_work(priv);
4137 iwl3945_setup_rx_handlers(priv); 4096 iwl3945_setup_rx_handlers(priv);
4138 iwl_power_initialize(priv); 4097 iwl_legacy_power_initialize(priv);
4139 4098
4140 /********************************* 4099 /*********************************
4141 * 8. Setup and Register mac80211 4100 * 8. Setup and Register mac80211
4142 * *******************************/ 4101 * *******************************/
4143 4102
4144 iwl_enable_interrupts(priv); 4103 iwl_legacy_enable_interrupts(priv);
4145 4104
4146 err = iwl3945_setup_mac(priv); 4105 err = iwl3945_setup_mac(priv);
4147 if (err) 4106 if (err)
4148 goto out_remove_sysfs; 4107 goto out_remove_sysfs;
4149 4108
4150 err = iwl_dbgfs_register(priv, DRV_NAME); 4109 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
4151 if (err) 4110 if (err)
4152 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4111 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4153 4112
@@ -4165,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4165 free_irq(priv->pci_dev->irq, priv); 4124 free_irq(priv->pci_dev->irq, priv);
4166 out_disable_msi: 4125 out_disable_msi:
4167 pci_disable_msi(priv->pci_dev); 4126 pci_disable_msi(priv->pci_dev);
4168 iwlcore_free_geos(priv); 4127 iwl_legacy_free_geos(priv);
4169 iwl_free_channel_map(priv); 4128 iwl_legacy_free_channel_map(priv);
4170 out_unset_hw_params: 4129 out_unset_hw_params:
4171 iwl3945_unset_hw_params(priv); 4130 iwl3945_unset_hw_params(priv);
4172 out_eeprom_free: 4131 out_eeprom_free:
4173 iwl_eeprom_free(priv); 4132 iwl_legacy_eeprom_free(priv);
4174 out_iounmap: 4133 out_iounmap:
4175 pci_iounmap(pdev, priv->hw_base); 4134 pci_iounmap(pdev, priv->hw_base);
4176 out_pci_release_regions: 4135 out_pci_release_regions:
@@ -4179,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4179 pci_set_drvdata(pdev, NULL); 4138 pci_set_drvdata(pdev, NULL);
4180 pci_disable_device(pdev); 4139 pci_disable_device(pdev);
4181 out_ieee80211_free_hw: 4140 out_ieee80211_free_hw:
4182 iwl_free_traffic_mem(priv); 4141 iwl_legacy_free_traffic_mem(priv);
4183 ieee80211_free_hw(priv->hw); 4142 ieee80211_free_hw(priv->hw);
4184 out: 4143 out:
4185 return err; 4144 return err;
@@ -4195,11 +4154,11 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4195 4154
4196 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 4155 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
4197 4156
4198 iwl_dbgfs_unregister(priv); 4157 iwl_legacy_dbgfs_unregister(priv);
4199 4158
4200 set_bit(STATUS_EXIT_PENDING, &priv->status); 4159 set_bit(STATUS_EXIT_PENDING, &priv->status);
4201 4160
4202 iwl_leds_exit(priv); 4161 iwl_legacy_leds_exit(priv);
4203 4162
4204 if (priv->mac80211_registered) { 4163 if (priv->mac80211_registered) {
4205 ieee80211_unregister_hw(priv->hw); 4164 ieee80211_unregister_hw(priv->hw);
@@ -4215,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4215 * paths to avoid running iwl_down() at all before leaving driver. 4174 * paths to avoid running iwl_down() at all before leaving driver.
4216 * This (inexpensive) call *makes sure* device is reset. 4175 * This (inexpensive) call *makes sure* device is reset.
4217 */ 4176 */
4218 iwl_apm_stop(priv); 4177 iwl_legacy_apm_stop(priv);
4219 4178
4220 /* make sure we flush any pending irq or 4179 /* make sure we flush any pending irq or
4221 * tasklet for the driver 4180 * tasklet for the driver
4222 */ 4181 */
4223 spin_lock_irqsave(&priv->lock, flags); 4182 spin_lock_irqsave(&priv->lock, flags);
4224 iwl_disable_interrupts(priv); 4183 iwl_legacy_disable_interrupts(priv);
4225 spin_unlock_irqrestore(&priv->lock, flags); 4184 spin_unlock_irqrestore(&priv->lock, flags);
4226 4185
4227 iwl_synchronize_irq(priv); 4186 iwl3945_synchronize_irq(priv);
4228 4187
4229 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4188 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4230 4189
@@ -4246,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4246 * until now... */ 4205 * until now... */
4247 destroy_workqueue(priv->workqueue); 4206 destroy_workqueue(priv->workqueue);
4248 priv->workqueue = NULL; 4207 priv->workqueue = NULL;
4249 iwl_free_traffic_mem(priv); 4208 iwl_legacy_free_traffic_mem(priv);
4250 4209
4251 free_irq(pdev->irq, priv); 4210 free_irq(pdev->irq, priv);
4252 pci_disable_msi(pdev); 4211 pci_disable_msi(pdev);
@@ -4256,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4256 pci_disable_device(pdev); 4215 pci_disable_device(pdev);
4257 pci_set_drvdata(pdev, NULL); 4216 pci_set_drvdata(pdev, NULL);
4258 4217
4259 iwl_free_channel_map(priv); 4218 iwl_legacy_free_channel_map(priv);
4260 iwlcore_free_geos(priv); 4219 iwl_legacy_free_geos(priv);
4261 kfree(priv->scan_cmd); 4220 kfree(priv->scan_cmd);
4262 if (priv->beacon_skb) 4221 if (priv->beacon_skb)
4263 dev_kfree_skb(priv->beacon_skb); 4222 dev_kfree_skb(priv->beacon_skb);
@@ -4277,7 +4236,7 @@ static struct pci_driver iwl3945_driver = {
4277 .id_table = iwl3945_hw_card_ids, 4236 .id_table = iwl3945_hw_card_ids,
4278 .probe = iwl3945_pci_probe, 4237 .probe = iwl3945_pci_probe,
4279 .remove = __devexit_p(iwl3945_pci_remove), 4238 .remove = __devexit_p(iwl3945_pci_remove),
4280 .driver.pm = IWL_PM_OPS, 4239 .driver.pm = IWL_LEGACY_PM_OPS,
4281}; 4240};
4282 4241
4283static int __init iwl3945_init(void) 4242static int __init iwl3945_init(void)
@@ -4318,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4318MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4277MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4319module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); 4278module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4320MODULE_PARM_DESC(swcrypto, 4279MODULE_PARM_DESC(swcrypto,
4321 "using software crypto (default 1 [software])\n"); 4280 "using software crypto (default 1 [software])");
4322#ifdef CONFIG_IWLWIFI_DEBUG
4323module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4324MODULE_PARM_DESC(debug, "debug output mask");
4325#endif
4326module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4281module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4327 int, S_IRUGO); 4282 int, S_IRUGO);
4328MODULE_PARM_DESC(disable_hw_scan, 4283MODULE_PARM_DESC(disable_hw_scan,
4329 "disable hardware scanning (default 0) (deprecated)"); 4284 "disable hardware scanning (default 0) (deprecated)");
4330module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); 4285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4331MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4286module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4287MODULE_PARM_DESC(debug, "debug output mask");
4288#endif
4289module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4290MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4332 4291
4333module_exit(iwl3945_exit); 4292module_exit(iwl3945_exit);
4334module_init(iwl3945_init); 4293module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..91b3d8b9d7a5
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3632 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl4965"
53
54#include "iwl-eeprom.h"
55#include "iwl-dev.h"
56#include "iwl-core.h"
57#include "iwl-io.h"
58#include "iwl-helpers.h"
59#include "iwl-sta.h"
60#include "iwl-4965-calib.h"
61#include "iwl-4965.h"
62#include "iwl-4965-led.h"
63
64
65/******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71/*
72 * module name, copyright, version, etc.
73 */
74#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77#define VD "d"
78#else
79#define VD
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD
83
84
85MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90
91void iwl4965_update_chain_flags(struct iwl_priv *priv)
92{
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102}
103
104static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105{
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123}
124
125static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126{
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143}
144
145static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146{
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149}
150
151static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154{
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166}
167
168/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
169static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172{
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193}
194
195static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197{
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248}
249
250int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251{
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276}
277
278static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279{
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288}
289
290static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291{
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295}
296
297static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299{
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310}
311
312static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313{
314 return tfd->num_tbs & 0x1f;
315}
316
317/**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
325void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326{
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370}
371
372int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376{
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405}
406
407/*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
414int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416{
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424}
425
426/******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
431static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465}
466
467/**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
477static void iwl4965_bg_statistics_periodic(unsigned long data)
478{
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489}
490
491
492static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
493 u32 start_idx, u32 num_events,
494 u32 mode)
495{
496 u32 i;
497 u32 ptr; /* SRAM byte address of log data */
498 u32 ev, time, data; /* event log data */
499 unsigned long reg_flags;
500
501 if (mode == 0)
502 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
503 else
504 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
505
506 /* Make sure device is powered up for SRAM reads */
507 spin_lock_irqsave(&priv->reg_lock, reg_flags);
508 if (iwl_grab_nic_access(priv)) {
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510 return;
511 }
512
513 /* Set starting address; reads will auto-increment */
514 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
515 rmb();
516
517 /*
518 * "time" is actually "data" for mode 0 (no timestamp).
519 * place event id # at far right for easier visual parsing.
520 */
521 for (i = 0; i < num_events; i++) {
522 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
523 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
524 if (mode == 0) {
525 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
526 0, time, ev);
527 } else {
528 data = _iwl_legacy_read_direct32(priv,
529 HBUS_TARG_MEM_RDAT);
530 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
531 time, data, ev);
532 }
533 }
534 /* Allow device to power down */
535 iwl_release_nic_access(priv);
536 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
537}
538
539static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
540{
541 u32 capacity; /* event log capacity in # entries */
542 u32 base; /* SRAM byte address of event log header */
543 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
544 u32 num_wraps; /* # times uCode wrapped to top of log */
545 u32 next_entry; /* index of next entry to be written by uCode */
546
547 if (priv->ucode_type == UCODE_INIT)
548 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
549 else
550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
551 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
552 capacity = iwl_legacy_read_targ_mem(priv, base);
553 num_wraps = iwl_legacy_read_targ_mem(priv,
554 base + (2 * sizeof(u32)));
555 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
556 next_entry = iwl_legacy_read_targ_mem(priv,
557 base + (3 * sizeof(u32)));
558 } else
559 return;
560
561 if (num_wraps == priv->event_log.num_wraps) {
562 iwl4965_print_cont_event_trace(priv,
563 base, priv->event_log.next_entry,
564 next_entry - priv->event_log.next_entry,
565 mode);
566 priv->event_log.non_wraps_count++;
567 } else {
568 if ((num_wraps - priv->event_log.num_wraps) > 1)
569 priv->event_log.wraps_more_count++;
570 else
571 priv->event_log.wraps_once_count++;
572 trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
573 num_wraps - priv->event_log.num_wraps,
574 next_entry, priv->event_log.next_entry);
575 if (next_entry < priv->event_log.next_entry) {
576 iwl4965_print_cont_event_trace(priv, base,
577 priv->event_log.next_entry,
578 capacity - priv->event_log.next_entry,
579 mode);
580
581 iwl4965_print_cont_event_trace(priv, base, 0,
582 next_entry, mode);
583 } else {
584 iwl4965_print_cont_event_trace(priv, base,
585 next_entry, capacity - next_entry,
586 mode);
587
588 iwl4965_print_cont_event_trace(priv, base, 0,
589 next_entry, mode);
590 }
591 }
592 priv->event_log.num_wraps = num_wraps;
593 priv->event_log.next_entry = next_entry;
594}
595
596/**
597 * iwl4965_bg_ucode_trace - Timer callback to log ucode event
598 *
599 * The timer is continually set to execute every
600 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
601 * this function is to perform continuous uCode event logging operation
602 * if enabled
603 */
604static void iwl4965_bg_ucode_trace(unsigned long data)
605{
606 struct iwl_priv *priv = (struct iwl_priv *)data;
607
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return;
610
611 if (priv->event_log.ucode_trace) {
612 iwl4965_continuous_event_trace(priv);
613 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
614 mod_timer(&priv->ucode_trace,
615 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
616 }
617}
618
619static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
620 struct iwl_rx_mem_buffer *rxb)
621{
622 struct iwl_rx_packet *pkt = rxb_addr(rxb);
623 struct iwl4965_beacon_notif *beacon =
624 (struct iwl4965_beacon_notif *)pkt->u.raw;
625#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
626 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
627
628 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
629 "tsf %d %d rate %d\n",
630 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
631 beacon->beacon_notify_hdr.failure_frame,
632 le32_to_cpu(beacon->ibss_mgr_status),
633 le32_to_cpu(beacon->high_tsf),
634 le32_to_cpu(beacon->low_tsf), rate);
635#endif
636
637 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
638}
639
640static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
641{
642 unsigned long flags;
643
644 IWL_DEBUG_POWER(priv, "Stop all queues\n");
645
646 if (priv->mac80211_registered)
647 ieee80211_stop_queues(priv->hw);
648
649 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
650 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
651 iwl_read32(priv, CSR_UCODE_DRV_GP1);
652
653 spin_lock_irqsave(&priv->reg_lock, flags);
654 if (!iwl_grab_nic_access(priv))
655 iwl_release_nic_access(priv);
656 spin_unlock_irqrestore(&priv->reg_lock, flags);
657}
658
659/* Handle notification from uCode that card's power state is changing
660 * due to software, hardware, or critical temperature RFKILL */
661static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
662 struct iwl_rx_mem_buffer *rxb)
663{
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
666 unsigned long status = priv->status;
667
668 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
669 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
670 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
671 (flags & CT_CARD_DISABLED) ?
672 "Reached" : "Not reached");
673
674 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
675 CT_CARD_DISABLED)) {
676
677 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
678 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
679
680 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
681 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
682
683 if (!(flags & RXON_CARD_DISABLED)) {
684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
686 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
687 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
688 }
689 }
690
691 if (flags & CT_CARD_DISABLED)
692 iwl4965_perform_ct_kill_task(priv);
693
694 if (flags & HW_CARD_DISABLED)
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696 else
697 clear_bit(STATUS_RF_KILL_HW, &priv->status);
698
699 if (!(flags & RXON_CARD_DISABLED))
700 iwl_legacy_scan_cancel(priv);
701
702 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
703 test_bit(STATUS_RF_KILL_HW, &priv->status)))
704 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
705 test_bit(STATUS_RF_KILL_HW, &priv->status));
706 else
707 wake_up_interruptible(&priv->wait_command_queue);
708}
709
710/**
711 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
712 *
713 * Setup the RX handlers for each of the reply types sent from the uCode
714 * to the host.
715 *
716 * This function chains into the hardware specific files for them to setup
717 * any hardware specific handlers as well.
718 */
719static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
720{
721 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
722 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
723 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
724 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
725 iwl_legacy_rx_spectrum_measure_notif;
726 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
727 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
728 iwl_legacy_rx_pm_debug_statistics_notif;
729 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
730
731 /*
732 * The same handler is used for both the REPLY to a discrete
733 * statistics request from the host as well as for the periodic
734 * statistics notifications (after received beacons) from the uCode.
735 */
736 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
737 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
738
739 iwl_legacy_setup_rx_scan_handlers(priv);
740
741 /* status change handler */
742 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
743 iwl4965_rx_card_state_notif;
744
745 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
746 iwl4965_rx_missed_beacon_notif;
747 /* Rx handlers */
748 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
749 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
750 /* block ack */
751 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
752 /* Set up hardware specific Rx handlers */
753 priv->cfg->ops->lib->rx_handler_setup(priv);
754}
755
756/**
757 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
758 *
759 * Uses the priv->rx_handlers callback function array to invoke
760 * the appropriate handlers, including command responses,
761 * frame-received notifications, and other notifications.
762 */
763void iwl4965_rx_handle(struct iwl_priv *priv)
764{
765 struct iwl_rx_mem_buffer *rxb;
766 struct iwl_rx_packet *pkt;
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 u32 r, i;
769 int reclaim;
770 unsigned long flags;
771 u8 fill_rx = 0;
772 u32 count = 8;
773 int total_empty;
774
775 /* uCode's read index (stored in shared DRAM) indicates the last Rx
776 * buffer that the driver may process (last buffer filled by ucode). */
777 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
778 i = rxq->read;
779
780 /* Rx interrupt, but nothing sent from uCode */
781 if (i == r)
782 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
783
784 /* calculate total frames need to be restock after handling RX */
785 total_empty = r - rxq->write_actual;
786 if (total_empty < 0)
787 total_empty += RX_QUEUE_SIZE;
788
789 if (total_empty > (RX_QUEUE_SIZE / 2))
790 fill_rx = 1;
791
792 while (i != r) {
793 int len;
794
795 rxb = rxq->queue[i];
796
797 /* If an RXB doesn't have a Rx queue slot associated with it,
798 * then a bug has been introduced in the queue refilling
799 * routines -- catch it here */
800 BUG_ON(rxb == NULL);
801
802 rxq->queue[i] = NULL;
803
804 pci_unmap_page(priv->pci_dev, rxb->page_dma,
805 PAGE_SIZE << priv->hw_params.rx_page_order,
806 PCI_DMA_FROMDEVICE);
807 pkt = rxb_addr(rxb);
808
809 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
810 len += sizeof(u32); /* account for status word */
811 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
812
813 /* Reclaim a command buffer only if this packet is a response
814 * to a (driver-originated) command.
815 * If the packet (e.g. Rx frame) originated from uCode,
816 * there is no command buffer to reclaim.
817 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
818 * but apparently a few don't get set; catch them here. */
819 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
820 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
821 (pkt->hdr.cmd != REPLY_RX) &&
822 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
823 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
824 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
825 (pkt->hdr.cmd != REPLY_TX);
826
827 /* Based on type of command response or notification,
828 * handle those that need handling via function in
829 * rx_handlers table. See iwl4965_setup_rx_handlers() */
830 if (priv->rx_handlers[pkt->hdr.cmd]) {
831 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
832 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
833 pkt->hdr.cmd);
834 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
835 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
836 } else {
837 /* No handling needed */
838 IWL_DEBUG_RX(priv,
839 "r %d i %d No handler needed for %s, 0x%02x\n",
840 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
841 pkt->hdr.cmd);
842 }
843
844 /*
845 * XXX: After here, we should always check rxb->page
846 * against NULL before touching it or its virtual
847 * memory (pkt). Because some rx_handler might have
848 * already taken or freed the pages.
849 */
850
851 if (reclaim) {
852 /* Invoke any callbacks, transfer the buffer to caller,
853 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
854 * as we reclaim the driver command queue */
855 if (rxb->page)
856 iwl_legacy_tx_cmd_complete(priv, rxb);
857 else
858 IWL_WARN(priv, "Claim null rxb?\n");
859 }
860
861 /* Reuse the page if possible. For notification packets and
862 * SKBs that fail to Rx correctly, add them back into the
863 * rx_free list for reuse later. */
864 spin_lock_irqsave(&rxq->lock, flags);
865 if (rxb->page != NULL) {
866 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
867 0, PAGE_SIZE << priv->hw_params.rx_page_order,
868 PCI_DMA_FROMDEVICE);
869 list_add_tail(&rxb->list, &rxq->rx_free);
870 rxq->free_count++;
871 } else
872 list_add_tail(&rxb->list, &rxq->rx_used);
873
874 spin_unlock_irqrestore(&rxq->lock, flags);
875
876 i = (i + 1) & RX_QUEUE_MASK;
877 /* If there are a lot of unused frames,
878 * restock the Rx queue so ucode wont assert. */
879 if (fill_rx) {
880 count++;
881 if (count >= 8) {
882 rxq->read = i;
883 iwl4965_rx_replenish_now(priv);
884 count = 0;
885 }
886 }
887 }
888
889 /* Backtrack one entry */
890 rxq->read = i;
891 if (fill_rx)
892 iwl4965_rx_replenish_now(priv);
893 else
894 iwl4965_rx_queue_restock(priv);
895}
896
897/* call this function to flush any scheduled tasklet */
898static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
899{
900 /* wait to make sure we flush pending tasklet*/
901 synchronize_irq(priv->pci_dev->irq);
902 tasklet_kill(&priv->irq_tasklet);
903}
904
905static void iwl4965_irq_tasklet(struct iwl_priv *priv)
906{
907 u32 inta, handled = 0;
908 u32 inta_fh;
909 unsigned long flags;
910 u32 i;
911#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
912 u32 inta_mask;
913#endif
914
915 spin_lock_irqsave(&priv->lock, flags);
916
917 /* Ack/clear/reset pending uCode interrupts.
918 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
919 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
920 inta = iwl_read32(priv, CSR_INT);
921 iwl_write32(priv, CSR_INT, inta);
922
923 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
924 * Any new interrupts that happen after this, either while we're
925 * in this tasklet, or later, will show up in next ISR/tasklet. */
926 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
927 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
928
929#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
930 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
931 /* just for debug */
932 inta_mask = iwl_read32(priv, CSR_INT_MASK);
933 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
934 inta, inta_mask, inta_fh);
935 }
936#endif
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
941 * atomic, make sure that inta covers all the interrupts that
942 * we've discovered, even if FH interrupt came in just after
943 * reading CSR_INT. */
944 if (inta_fh & CSR49_FH_INT_RX_MASK)
945 inta |= CSR_INT_BIT_FH_RX;
946 if (inta_fh & CSR49_FH_INT_TX_MASK)
947 inta |= CSR_INT_BIT_FH_TX;
948
949 /* Now service all interrupt bits discovered above. */
950 if (inta & CSR_INT_BIT_HW_ERR) {
951 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
952
953 /* Tell the device to stop sending interrupts */
954 iwl_legacy_disable_interrupts(priv);
955
956 priv->isr_stats.hw++;
957 iwl_legacy_irq_handle_error(priv);
958
959 handled |= CSR_INT_BIT_HW_ERR;
960
961 return;
962 }
963
964#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
965 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
966 /* NIC fires this, but we don't use it, redundant with WAKEUP */
967 if (inta & CSR_INT_BIT_SCD) {
968 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
969 "the frame/frames.\n");
970 priv->isr_stats.sch++;
971 }
972
973 /* Alive notification via Rx interrupt will do the real work */
974 if (inta & CSR_INT_BIT_ALIVE) {
975 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
976 priv->isr_stats.alive++;
977 }
978 }
979#endif
980 /* Safely ignore these bits for debug checks below */
981 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
982
983 /* HW RF KILL switch toggled */
984 if (inta & CSR_INT_BIT_RF_KILL) {
985 int hw_rf_kill = 0;
986 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
987 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
988 hw_rf_kill = 1;
989
990 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
991 hw_rf_kill ? "disable radio" : "enable radio");
992
993 priv->isr_stats.rfkill++;
994
995 /* driver only loads ucode once setting the interface up.
996 * the driver allows loading the ucode even if the radio
997 * is killed. Hence update the killswitch state here. The
998 * rfkill handler will care about restarting if needed.
999 */
1000 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1001 if (hw_rf_kill)
1002 set_bit(STATUS_RF_KILL_HW, &priv->status);
1003 else
1004 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1005 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1006 }
1007
1008 handled |= CSR_INT_BIT_RF_KILL;
1009 }
1010
1011 /* Chip got too hot and stopped itself */
1012 if (inta & CSR_INT_BIT_CT_KILL) {
1013 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1014 priv->isr_stats.ctkill++;
1015 handled |= CSR_INT_BIT_CT_KILL;
1016 }
1017
1018 /* Error detected by uCode */
1019 if (inta & CSR_INT_BIT_SW_ERR) {
1020 IWL_ERR(priv, "Microcode SW error detected. "
1021 " Restarting 0x%X.\n", inta);
1022 priv->isr_stats.sw++;
1023 iwl_legacy_irq_handle_error(priv);
1024 handled |= CSR_INT_BIT_SW_ERR;
1025 }
1026
1027 /*
1028 * uCode wakes up after power-down sleep.
1029 * Tell device about any new tx or host commands enqueued,
1030 * and about any Rx buffers made available while asleep.
1031 */
1032 if (inta & CSR_INT_BIT_WAKEUP) {
1033 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1034 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1035 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1036 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
1037 priv->isr_stats.wakeup++;
1038 handled |= CSR_INT_BIT_WAKEUP;
1039 }
1040
1041 /* All uCode command responses, including Tx command responses,
1042 * Rx "responses" (frame-received notification), and other
1043 * notifications from uCode come through here*/
1044 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1045 iwl4965_rx_handle(priv);
1046 priv->isr_stats.rx++;
1047 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1048 }
1049
1050 /* This "Tx" DMA channel is used only for loading uCode */
1051 if (inta & CSR_INT_BIT_FH_TX) {
1052 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1053 priv->isr_stats.tx++;
1054 handled |= CSR_INT_BIT_FH_TX;
1055 /* Wake up uCode load routine, now that load is complete */
1056 priv->ucode_write_complete = 1;
1057 wake_up_interruptible(&priv->wait_command_queue);
1058 }
1059
1060 if (inta & ~handled) {
1061 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1062 priv->isr_stats.unhandled++;
1063 }
1064
1065 if (inta & ~(priv->inta_mask)) {
1066 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1067 inta & ~priv->inta_mask);
1068 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1069 }
1070
1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv);
1075
1076#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1078 inta = iwl_read32(priv, CSR_INT);
1079 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1080 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1081 IWL_DEBUG_ISR(priv,
1082 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1083 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1084 }
1085#endif
1086}
1087
1088/*****************************************************************************
1089 *
1090 * sysfs attributes
1091 *
1092 *****************************************************************************/
1093
1094#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1095
1096/*
1097 * The following adds a new attribute to the sysfs representation
1098 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
1099 * used for controlling the debug level.
1100 *
1101 * See the level definitions in iwl for details.
1102 *
1103 * The debug_level being managed using sysfs below is a per device debug
1104 * level that is used instead of the global debug level if it (the per
1105 * device debug level) is set.
1106 */
1107static ssize_t iwl4965_show_debug_level(struct device *d,
1108 struct device_attribute *attr, char *buf)
1109{
1110 struct iwl_priv *priv = dev_get_drvdata(d);
1111 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
1112}
1113static ssize_t iwl4965_store_debug_level(struct device *d,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1116{
1117 struct iwl_priv *priv = dev_get_drvdata(d);
1118 unsigned long val;
1119 int ret;
1120
1121 ret = strict_strtoul(buf, 0, &val);
1122 if (ret)
1123 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
1124 else {
1125 priv->debug_level = val;
1126 if (iwl_legacy_alloc_traffic_mem(priv))
1127 IWL_ERR(priv,
1128 "Not enough memory to generate traffic log\n");
1129 }
1130 return strnlen(buf, count);
1131}
1132
1133static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1134 iwl4965_show_debug_level, iwl4965_store_debug_level);
1135
1136
1137#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1138
1139
1140static ssize_t iwl4965_show_temperature(struct device *d,
1141 struct device_attribute *attr, char *buf)
1142{
1143 struct iwl_priv *priv = dev_get_drvdata(d);
1144
1145 if (!iwl_legacy_is_alive(priv))
1146 return -EAGAIN;
1147
1148 return sprintf(buf, "%d\n", priv->temperature);
1149}
1150
1151static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1152
1153static ssize_t iwl4965_show_tx_power(struct device *d,
1154 struct device_attribute *attr, char *buf)
1155{
1156 struct iwl_priv *priv = dev_get_drvdata(d);
1157
1158 if (!iwl_legacy_is_ready_rf(priv))
1159 return sprintf(buf, "off\n");
1160 else
1161 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1162}
1163
1164static ssize_t iwl4965_store_tx_power(struct device *d,
1165 struct device_attribute *attr,
1166 const char *buf, size_t count)
1167{
1168 struct iwl_priv *priv = dev_get_drvdata(d);
1169 unsigned long val;
1170 int ret;
1171
1172 ret = strict_strtoul(buf, 10, &val);
1173 if (ret)
1174 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1175 else {
1176 ret = iwl_legacy_set_tx_power(priv, val, false);
1177 if (ret)
1178 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1179 ret);
1180 else
1181 ret = count;
1182 }
1183 return ret;
1184}
1185
1186static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1187 iwl4965_show_tx_power, iwl4965_store_tx_power);
1188
1189static struct attribute *iwl_sysfs_entries[] = {
1190 &dev_attr_temperature.attr,
1191 &dev_attr_tx_power.attr,
1192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1193 &dev_attr_debug_level.attr,
1194#endif
1195 NULL
1196};
1197
1198static struct attribute_group iwl_attribute_group = {
1199 .name = NULL, /* put in device directory */
1200 .attrs = iwl_sysfs_entries,
1201};
1202
1203/******************************************************************************
1204 *
1205 * uCode download functions
1206 *
1207 ******************************************************************************/
1208
1209static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1210{
1211 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1212 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1213 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1214 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1215 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1216 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1217}
1218
1219static void iwl4965_nic_start(struct iwl_priv *priv)
1220{
1221 /* Remove all resets to allow NIC to operate */
1222 iwl_write32(priv, CSR_RESET, 0);
1223}
1224
1225static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1226 void *context);
1227static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1228 u32 max_probe_length);
1229
1230static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1231{
1232 const char *name_pre = priv->cfg->fw_name_pre;
1233 char tag[8];
1234
1235 if (first) {
1236 priv->fw_index = priv->cfg->ucode_api_max;
1237 sprintf(tag, "%d", priv->fw_index);
1238 } else {
1239 priv->fw_index--;
1240 sprintf(tag, "%d", priv->fw_index);
1241 }
1242
1243 if (priv->fw_index < priv->cfg->ucode_api_min) {
1244 IWL_ERR(priv, "no suitable firmware found!\n");
1245 return -ENOENT;
1246 }
1247
1248 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1249
1250 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1251 priv->firmware_name);
1252
1253 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1254 &priv->pci_dev->dev, GFP_KERNEL, priv,
1255 iwl4965_ucode_callback);
1256}
1257
1258struct iwl4965_firmware_pieces {
1259 const void *inst, *data, *init, *init_data, *boot;
1260 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1261};
1262
1263static int iwl4965_load_firmware(struct iwl_priv *priv,
1264 const struct firmware *ucode_raw,
1265 struct iwl4965_firmware_pieces *pieces)
1266{
1267 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1268 u32 api_ver, hdr_size;
1269 const u8 *src;
1270
1271 priv->ucode_ver = le32_to_cpu(ucode->ver);
1272 api_ver = IWL_UCODE_API(priv->ucode_ver);
1273
1274 switch (api_ver) {
1275 default:
1276 case 0:
1277 case 1:
1278 case 2:
1279 hdr_size = 24;
1280 if (ucode_raw->size < hdr_size) {
1281 IWL_ERR(priv, "File size too small!\n");
1282 return -EINVAL;
1283 }
1284 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1285 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1286 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1287 pieces->init_data_size =
1288 le32_to_cpu(ucode->v1.init_data_size);
1289 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1290 src = ucode->v1.data;
1291 break;
1292 }
1293
1294 /* Verify size of file vs. image size info in file's header */
1295 if (ucode_raw->size != hdr_size + pieces->inst_size +
1296 pieces->data_size + pieces->init_size +
1297 pieces->init_data_size + pieces->boot_size) {
1298
1299 IWL_ERR(priv,
1300 "uCode file size %d does not match expected size\n",
1301 (int)ucode_raw->size);
1302 return -EINVAL;
1303 }
1304
1305 pieces->inst = src;
1306 src += pieces->inst_size;
1307 pieces->data = src;
1308 src += pieces->data_size;
1309 pieces->init = src;
1310 src += pieces->init_size;
1311 pieces->init_data = src;
1312 src += pieces->init_data_size;
1313 pieces->boot = src;
1314 src += pieces->boot_size;
1315
1316 return 0;
1317}
1318
1319/**
1320 * iwl4965_ucode_callback - callback when firmware was loaded
1321 *
1322 * If loaded successfully, copies the firmware into buffers
1323 * for the card to fetch (via DMA).
1324 */
1325static void
1326iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1327{
1328 struct iwl_priv *priv = context;
1329 struct iwl_ucode_header *ucode;
1330 int err;
1331 struct iwl4965_firmware_pieces pieces;
1332 const unsigned int api_max = priv->cfg->ucode_api_max;
1333 const unsigned int api_min = priv->cfg->ucode_api_min;
1334 u32 api_ver;
1335
1336 u32 max_probe_length = 200;
1337 u32 standard_phy_calibration_size =
1338 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1339
1340 memset(&pieces, 0, sizeof(pieces));
1341
1342 if (!ucode_raw) {
1343 if (priv->fw_index <= priv->cfg->ucode_api_max)
1344 IWL_ERR(priv,
1345 "request for firmware file '%s' failed.\n",
1346 priv->firmware_name);
1347 goto try_again;
1348 }
1349
1350 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1351 priv->firmware_name, ucode_raw->size);
1352
1353 /* Make sure that we got at least the API version number */
1354 if (ucode_raw->size < 4) {
1355 IWL_ERR(priv, "File size way too small!\n");
1356 goto try_again;
1357 }
1358
1359 /* Data from ucode file: header followed by uCode images */
1360 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1361
1362 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1363
1364 if (err)
1365 goto try_again;
1366
1367 api_ver = IWL_UCODE_API(priv->ucode_ver);
1368
1369 /*
1370 * api_ver should match the api version forming part of the
1371 * firmware filename ... but we don't check for that and only rely
1372 * on the API version read from firmware header from here on forward
1373 */
1374 if (api_ver < api_min || api_ver > api_max) {
1375 IWL_ERR(priv,
1376 "Driver unable to support your firmware API. "
1377 "Driver supports v%u, firmware is v%u.\n",
1378 api_max, api_ver);
1379 goto try_again;
1380 }
1381
1382 if (api_ver != api_max)
1383 IWL_ERR(priv,
1384 "Firmware has old API version. Expected v%u, "
1385 "got v%u. New firmware can be obtained "
1386 "from http://www.intellinuxwireless.org.\n",
1387 api_max, api_ver);
1388
1389 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1390 IWL_UCODE_MAJOR(priv->ucode_ver),
1391 IWL_UCODE_MINOR(priv->ucode_ver),
1392 IWL_UCODE_API(priv->ucode_ver),
1393 IWL_UCODE_SERIAL(priv->ucode_ver));
1394
1395 snprintf(priv->hw->wiphy->fw_version,
1396 sizeof(priv->hw->wiphy->fw_version),
1397 "%u.%u.%u.%u",
1398 IWL_UCODE_MAJOR(priv->ucode_ver),
1399 IWL_UCODE_MINOR(priv->ucode_ver),
1400 IWL_UCODE_API(priv->ucode_ver),
1401 IWL_UCODE_SERIAL(priv->ucode_ver));
1402
1403 /*
1404 * For any of the failures below (before allocating pci memory)
1405 * we will try to load a version with a smaller API -- maybe the
1406 * user just got a corrupted version of the latest API.
1407 */
1408
1409 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1410 priv->ucode_ver);
1411 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1412 pieces.inst_size);
1413 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1414 pieces.data_size);
1415 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1416 pieces.init_size);
1417 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1418 pieces.init_data_size);
1419 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1420 pieces.boot_size);
1421
1422 /* Verify that uCode images will fit in card's SRAM */
1423 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1424 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1425 pieces.inst_size);
1426 goto try_again;
1427 }
1428
1429 if (pieces.data_size > priv->hw_params.max_data_size) {
1430 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1431 pieces.data_size);
1432 goto try_again;
1433 }
1434
1435 if (pieces.init_size > priv->hw_params.max_inst_size) {
1436 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1437 pieces.init_size);
1438 goto try_again;
1439 }
1440
1441 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1442 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1443 pieces.init_data_size);
1444 goto try_again;
1445 }
1446
1447 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1448 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1449 pieces.boot_size);
1450 goto try_again;
1451 }
1452
1453 /* Allocate ucode buffers for card's bus-master loading ... */
1454
1455 /* Runtime instructions and 2 copies of data:
1456 * 1) unmodified from disk
1457 * 2) backup cache for save/restore during power-downs */
1458 priv->ucode_code.len = pieces.inst_size;
1459 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1460
1461 priv->ucode_data.len = pieces.data_size;
1462 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1463
1464 priv->ucode_data_backup.len = pieces.data_size;
1465 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1466
1467 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1468 !priv->ucode_data_backup.v_addr)
1469 goto err_pci_alloc;
1470
1471 /* Initialization instructions and data */
1472 if (pieces.init_size && pieces.init_data_size) {
1473 priv->ucode_init.len = pieces.init_size;
1474 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1475
1476 priv->ucode_init_data.len = pieces.init_data_size;
1477 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1478
1479 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1480 goto err_pci_alloc;
1481 }
1482
1483 /* Bootstrap (instructions only, no data) */
1484 if (pieces.boot_size) {
1485 priv->ucode_boot.len = pieces.boot_size;
1486 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1487
1488 if (!priv->ucode_boot.v_addr)
1489 goto err_pci_alloc;
1490 }
1491
1492 /* Now that we can no longer fail, copy information */
1493
1494 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1495
1496 /* Copy images into buffers for card's bus-master reads ... */
1497
1498 /* Runtime instructions (first block of data in file) */
1499 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1500 pieces.inst_size);
1501 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1502
1503 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1504 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1505
1506 /*
1507 * Runtime data
1508 * NOTE: Copy into backup buffer will be done in iwl_up()
1509 */
1510 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1511 pieces.data_size);
1512 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1513 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1514
1515 /* Initialization instructions */
1516 if (pieces.init_size) {
1517 IWL_DEBUG_INFO(priv,
1518 "Copying (but not loading) init instr len %Zd\n",
1519 pieces.init_size);
1520 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1521 }
1522
1523 /* Initialization data */
1524 if (pieces.init_data_size) {
1525 IWL_DEBUG_INFO(priv,
1526 "Copying (but not loading) init data len %Zd\n",
1527 pieces.init_data_size);
1528 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1529 pieces.init_data_size);
1530 }
1531
1532 /* Bootstrap instructions */
1533 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1534 pieces.boot_size);
1535 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1536
1537 /*
1538 * figure out the offset of chain noise reset and gain commands
1539 * base on the size of standard phy calibration commands table size
1540 */
1541 priv->_4965.phy_calib_chain_noise_reset_cmd =
1542 standard_phy_calibration_size;
1543 priv->_4965.phy_calib_chain_noise_gain_cmd =
1544 standard_phy_calibration_size + 1;
1545
1546 /**************************************************
1547 * This is still part of probe() in a sense...
1548 *
1549 * 9. Setup and register with mac80211 and debugfs
1550 **************************************************/
1551 err = iwl4965_mac_setup_register(priv, max_probe_length);
1552 if (err)
1553 goto out_unbind;
1554
1555 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1556 if (err)
1557 IWL_ERR(priv,
1558 "failed to create debugfs files. Ignoring error: %d\n", err);
1559
1560 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1561 &iwl_attribute_group);
1562 if (err) {
1563 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1564 goto out_unbind;
1565 }
1566
1567 /* We have our copies now, allow OS release its copies */
1568 release_firmware(ucode_raw);
1569 complete(&priv->_4965.firmware_loading_complete);
1570 return;
1571
1572 try_again:
1573 /* try next, if any */
1574 if (iwl4965_request_firmware(priv, false))
1575 goto out_unbind;
1576 release_firmware(ucode_raw);
1577 return;
1578
1579 err_pci_alloc:
1580 IWL_ERR(priv, "failed to allocate pci memory\n");
1581 iwl4965_dealloc_ucode_pci(priv);
1582 out_unbind:
1583 complete(&priv->_4965.firmware_loading_complete);
1584 device_release_driver(&priv->pci_dev->dev);
1585 release_firmware(ucode_raw);
1586}
1587
1588static const char * const desc_lookup_text[] = {
1589 "OK",
1590 "FAIL",
1591 "BAD_PARAM",
1592 "BAD_CHECKSUM",
1593 "NMI_INTERRUPT_WDG",
1594 "SYSASSERT",
1595 "FATAL_ERROR",
1596 "BAD_COMMAND",
1597 "HW_ERROR_TUNE_LOCK",
1598 "HW_ERROR_TEMPERATURE",
1599 "ILLEGAL_CHAN_FREQ",
1600 "VCC_NOT_STABLE",
1601 "FH_ERROR",
1602 "NMI_INTERRUPT_HOST",
1603 "NMI_INTERRUPT_ACTION_PT",
1604 "NMI_INTERRUPT_UNKNOWN",
1605 "UCODE_VERSION_MISMATCH",
1606 "HW_ERROR_ABS_LOCK",
1607 "HW_ERROR_CAL_LOCK_FAIL",
1608 "NMI_INTERRUPT_INST_ACTION_PT",
1609 "NMI_INTERRUPT_DATA_ACTION_PT",
1610 "NMI_TRM_HW_ER",
1611 "NMI_INTERRUPT_TRM",
1612 "NMI_INTERRUPT_BREAK_POINT"
1613 "DEBUG_0",
1614 "DEBUG_1",
1615 "DEBUG_2",
1616 "DEBUG_3",
1617};
1618
1619static struct { char *name; u8 num; } advanced_lookup[] = {
1620 { "NMI_INTERRUPT_WDG", 0x34 },
1621 { "SYSASSERT", 0x35 },
1622 { "UCODE_VERSION_MISMATCH", 0x37 },
1623 { "BAD_COMMAND", 0x38 },
1624 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1625 { "FATAL_ERROR", 0x3D },
1626 { "NMI_TRM_HW_ERR", 0x46 },
1627 { "NMI_INTERRUPT_TRM", 0x4C },
1628 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1629 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1630 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1631 { "NMI_INTERRUPT_HOST", 0x66 },
1632 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1633 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1634 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1635 { "ADVANCED_SYSASSERT", 0 },
1636};
1637
1638static const char *iwl4965_desc_lookup(u32 num)
1639{
1640 int i;
1641 int max = ARRAY_SIZE(desc_lookup_text);
1642
1643 if (num < max)
1644 return desc_lookup_text[num];
1645
1646 max = ARRAY_SIZE(advanced_lookup) - 1;
1647 for (i = 0; i < max; i++) {
1648 if (advanced_lookup[i].num == num)
1649 break;
1650 }
1651 return advanced_lookup[i].name;
1652}
1653
1654#define ERROR_START_OFFSET (1 * sizeof(u32))
1655#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1656
1657void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1658{
1659 u32 data2, line;
1660 u32 desc, time, count, base, data1;
1661 u32 blink1, blink2, ilink1, ilink2;
1662 u32 pc, hcmd;
1663
1664 if (priv->ucode_type == UCODE_INIT) {
1665 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1666 } else {
1667 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1668 }
1669
1670 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1671 IWL_ERR(priv,
1672 "Not valid error log pointer 0x%08X for %s uCode\n",
1673 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1674 return;
1675 }
1676
1677 count = iwl_legacy_read_targ_mem(priv, base);
1678
1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1683 }
1684
1685 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1686 priv->isr_stats.err_code = desc;
1687 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1688 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1689 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1690 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1691 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1692 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1693 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1694 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1695 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1696 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1697
1698 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1699 time, data1, data2, line,
1700 blink1, blink2, ilink1, ilink2);
1701
1702 IWL_ERR(priv, "Desc Time "
1703 "data1 data2 line\n");
1704 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1705 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1706 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1707 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1708 pc, blink1, blink2, ilink1, ilink2, hcmd);
1709}
1710
1711#define EVENT_START_OFFSET (4 * sizeof(u32))
1712
1713/**
1714 * iwl4965_print_event_log - Dump error event log to syslog
1715 *
1716 */
1717static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
1718 u32 num_events, u32 mode,
1719 int pos, char **buf, size_t bufsz)
1720{
1721 u32 i;
1722 u32 base; /* SRAM byte address of event log header */
1723 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1724 u32 ptr; /* SRAM byte address of log data */
1725 u32 ev, time, data; /* event log data */
1726 unsigned long reg_flags;
1727
1728 if (num_events == 0)
1729 return pos;
1730
1731 if (priv->ucode_type == UCODE_INIT) {
1732 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1733 } else {
1734 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1735 }
1736
1737 if (mode == 0)
1738 event_size = 2 * sizeof(u32);
1739 else
1740 event_size = 3 * sizeof(u32);
1741
1742 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1743
1744 /* Make sure device is powered up for SRAM reads */
1745 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1746 iwl_grab_nic_access(priv);
1747
1748 /* Set starting address; reads will auto-increment */
1749 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1750 rmb();
1751
1752 /* "time" is actually "data" for mode 0 (no timestamp).
1753 * place event id # at far right for easier visual parsing. */
1754 for (i = 0; i < num_events; i++) {
1755 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1756 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1757 if (mode == 0) {
1758 /* data, ev */
1759 if (bufsz) {
1760 pos += scnprintf(*buf + pos, bufsz - pos,
1761 "EVT_LOG:0x%08x:%04u\n",
1762 time, ev);
1763 } else {
1764 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1765 time, ev);
1766 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1767 time, ev);
1768 }
1769 } else {
1770 data = _iwl_legacy_read_direct32(priv,
1771 HBUS_TARG_MEM_RDAT);
1772 if (bufsz) {
1773 pos += scnprintf(*buf + pos, bufsz - pos,
1774 "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 time, data, ev);
1776 } else {
1777 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1778 time, data, ev);
1779 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1780 data, ev);
1781 }
1782 }
1783 }
1784
1785 /* Allow device to power down */
1786 iwl_release_nic_access(priv);
1787 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1788 return pos;
1789}
1790
1791/**
1792 * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
1793 */
1794static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795 u32 num_wraps, u32 next_entry,
1796 u32 size, u32 mode,
1797 int pos, char **buf, size_t bufsz)
1798{
1799 /*
1800 * display the newest DEFAULT_LOG_ENTRIES entries
1801 * i.e the entries just before the next ont that uCode would fill.
1802 */
1803 if (num_wraps) {
1804 if (next_entry < size) {
1805 pos = iwl4965_print_event_log(priv,
1806 capacity - (size - next_entry),
1807 size - next_entry, mode,
1808 pos, buf, bufsz);
1809 pos = iwl4965_print_event_log(priv, 0,
1810 next_entry, mode,
1811 pos, buf, bufsz);
1812 } else
1813 pos = iwl4965_print_event_log(priv, next_entry - size,
1814 size, mode, pos, buf, bufsz);
1815 } else {
1816 if (next_entry < size) {
1817 pos = iwl4965_print_event_log(priv, 0, next_entry,
1818 mode, pos, buf, bufsz);
1819 } else {
1820 pos = iwl4965_print_event_log(priv, next_entry - size,
1821 size, mode, pos, buf, bufsz);
1822 }
1823 }
1824 return pos;
1825}
1826
1827#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1828
1829int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1830 char **buf, bool display)
1831{
1832 u32 base; /* SRAM byte address of event log header */
1833 u32 capacity; /* event log capacity in # entries */
1834 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1835 u32 num_wraps; /* # times uCode wrapped to top of log */
1836 u32 next_entry; /* index of next entry to be written by uCode */
1837 u32 size; /* # entries that we'll print */
1838 int pos = 0;
1839 size_t bufsz = 0;
1840
1841 if (priv->ucode_type == UCODE_INIT) {
1842 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1843 } else {
1844 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1845 }
1846
1847 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1848 IWL_ERR(priv,
1849 "Invalid event log pointer 0x%08X for %s uCode\n",
1850 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1851 return -EINVAL;
1852 }
1853
1854 /* event log header */
1855 capacity = iwl_legacy_read_targ_mem(priv, base);
1856 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1857 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1858 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1859
1860 size = num_wraps ? capacity : next_entry;
1861
1862 /* bail out if nothing in log */
1863 if (size == 0) {
1864 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1865 return pos;
1866 }
1867
1868#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1869 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1870 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872#else
1873 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1874 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1875#endif
1876 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1877 size);
1878
1879#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1880 if (display) {
1881 if (full_log)
1882 bufsz = capacity * 48;
1883 else
1884 bufsz = size * 48;
1885 *buf = kmalloc(bufsz, GFP_KERNEL);
1886 if (!*buf)
1887 return -ENOMEM;
1888 }
1889 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1890 /*
1891 * if uCode has wrapped back to top of log,
1892 * start at the oldest entry,
1893 * i.e the next one that uCode would fill.
1894 */
1895 if (num_wraps)
1896 pos = iwl4965_print_event_log(priv, next_entry,
1897 capacity - next_entry, mode,
1898 pos, buf, bufsz);
1899 /* (then/else) start at top of log */
1900 pos = iwl4965_print_event_log(priv, 0,
1901 next_entry, mode, pos, buf, bufsz);
1902 } else
1903 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1904 next_entry, size, mode,
1905 pos, buf, bufsz);
1906#else
1907 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1908 next_entry, size, mode,
1909 pos, buf, bufsz);
1910#endif
1911 return pos;
1912}
1913
1914static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1915{
1916 struct iwl_ct_kill_config cmd;
1917 unsigned long flags;
1918 int ret = 0;
1919
1920 spin_lock_irqsave(&priv->lock, flags);
1921 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1922 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1923 spin_unlock_irqrestore(&priv->lock, flags);
1924
1925 cmd.critical_temperature_R =
1926 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927
1928 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1929 sizeof(cmd), &cmd);
1930 if (ret)
1931 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1932 else
1933 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1934 "succeeded, "
1935 "critical temperature is %d\n",
1936 priv->hw_params.ct_kill_threshold);
1937}
1938
1939static const s8 default_queue_to_tx_fifo[] = {
1940 IWL_TX_FIFO_VO,
1941 IWL_TX_FIFO_VI,
1942 IWL_TX_FIFO_BE,
1943 IWL_TX_FIFO_BK,
1944 IWL49_CMD_FIFO_NUM,
1945 IWL_TX_FIFO_UNUSED,
1946 IWL_TX_FIFO_UNUSED,
1947};
1948
1949static int iwl4965_alive_notify(struct iwl_priv *priv)
1950{
1951 u32 a;
1952 unsigned long flags;
1953 int i, chan;
1954 u32 reg_val;
1955
1956 spin_lock_irqsave(&priv->lock, flags);
1957
1958 /* Clear 4965's internal Tx Scheduler data base */
1959 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1960 IWL49_SCD_SRAM_BASE_ADDR);
1961 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1962 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1963 iwl_legacy_write_targ_mem(priv, a, 0);
1964 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1965 iwl_legacy_write_targ_mem(priv, a, 0);
1966 for (; a < priv->scd_base_addr +
1967 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1968 iwl_legacy_write_targ_mem(priv, a, 0);
1969
1970 /* Tel 4965 where to find Tx byte count tables */
1971 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1972 priv->scd_bc_tbls.dma >> 10);
1973
1974 /* Enable DMA channel */
1975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1976 iwl_legacy_write_direct32(priv,
1977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980
1981 /* Update FH chicken bits */
1982 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1983 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Disable chain mode for all queues */
1987 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1988
1989 /* Initialize each Tx queue (including the command queue) */
1990 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1991
1992 /* TFD circular buffer read/write indexes */
1993 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1994 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1995
1996 /* Max Tx Window size for Scheduler-ACK mode */
1997 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1998 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1999 (SCD_WIN_SIZE <<
2000 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2001 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2002
2003 /* Frame limit */
2004 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
2005 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
2006 sizeof(u32),
2007 (SCD_FRAME_LIMIT <<
2008 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2009 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2010
2011 }
2012 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
2013 (1 << priv->hw_params.max_txq_num) - 1);
2014
2015 /* Activate all Tx DMA/FIFO channels */
2016 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
2017
2018 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
2019
2020 /* make sure all queue are not stopped */
2021 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
2022 for (i = 0; i < 4; i++)
2023 atomic_set(&priv->queue_stop_count[i], 0);
2024
2025 /* reset to 0 to enable all the queue first */
2026 priv->txq_ctx_active_msk = 0;
2027 /* Map each Tx/cmd queue to its corresponding fifo */
2028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
2029
2030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2031 int ac = default_queue_to_tx_fifo[i];
2032
2033 iwl_txq_ctx_activate(priv, i);
2034
2035 if (ac == IWL_TX_FIFO_UNUSED)
2036 continue;
2037
2038 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2039 }
2040
2041 spin_unlock_irqrestore(&priv->lock, flags);
2042
2043 return 0;
2044}
2045
2046/**
2047 * iwl4965_alive_start - called after REPLY_ALIVE notification received
2048 * from protocol/runtime uCode (initialization uCode's
2049 * Alive gets handled by iwl_init_alive_start()).
2050 */
2051static void iwl4965_alive_start(struct iwl_priv *priv)
2052{
2053 int ret = 0;
2054 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2055
2056 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2057
2058 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2059 /* We had an error bringing up the hardware, so take it
2060 * all the way back down so we can try again */
2061 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2062 goto restart;
2063 }
2064
2065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2066 * This is a paranoid check, because we would not have gotten the
2067 * "runtime" alive if code weren't properly loaded. */
2068 if (iwl4965_verify_ucode(priv)) {
2069 /* Runtime instruction load was bad;
2070 * take it all the way back down so we can try again */
2071 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2072 goto restart;
2073 }
2074
2075 ret = iwl4965_alive_notify(priv);
2076 if (ret) {
2077 IWL_WARN(priv,
2078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2079 goto restart;
2080 }
2081
2082
2083 /* After the ALIVE response, we can send host commands to the uCode */
2084 set_bit(STATUS_ALIVE, &priv->status);
2085
2086 /* Enable watchdog to monitor the driver tx queues */
2087 iwl_legacy_setup_watchdog(priv);
2088
2089 if (iwl_legacy_is_rfkill(priv))
2090 return;
2091
2092 ieee80211_wake_queues(priv->hw);
2093
2094 priv->active_rate = IWL_RATES_MASK;
2095
2096 if (iwl_legacy_is_associated_ctx(ctx)) {
2097 struct iwl_legacy_rxon_cmd *active_rxon =
2098 (struct iwl_legacy_rxon_cmd *)&ctx->active;
2099 /* apply any changes in staging */
2100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2102 } else {
2103 struct iwl_rxon_context *tmp;
2104 /* Initialize our rx_config data */
2105 for_each_context(priv, tmp)
2106 iwl_legacy_connection_init_rx_config(priv, tmp);
2107
2108 if (priv->cfg->ops->hcmd->set_rxon_chain)
2109 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2110 }
2111
2112 /* Configure bluetooth coexistence if enabled */
2113 iwl_legacy_send_bt_config(priv);
2114
2115 iwl4965_reset_run_time_calib(priv);
2116
2117 set_bit(STATUS_READY, &priv->status);
2118
2119 /* Configure the adapter for unassociated operation */
2120 iwl_legacy_commit_rxon(priv, ctx);
2121
2122 /* At this point, the NIC is initialized and operational */
2123 iwl4965_rf_kill_ct_config(priv);
2124
2125 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2126 wake_up_interruptible(&priv->wait_command_queue);
2127
2128 iwl_legacy_power_update_mode(priv, true);
2129 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2130
2131 return;
2132
2133 restart:
2134 queue_work(priv->workqueue, &priv->restart);
2135}
2136
2137static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2138
2139static void __iwl4965_down(struct iwl_priv *priv)
2140{
2141 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2143
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145
2146 iwl_legacy_scan_cancel_timeout(priv, 200);
2147
2148 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2149
2150 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2151 * to prevent rearm timer */
2152 del_timer_sync(&priv->watchdog);
2153
2154 iwl_legacy_clear_ucode_stations(priv, NULL);
2155 iwl_legacy_dealloc_bcast_stations(priv);
2156 iwl_legacy_clear_driver_stations(priv);
2157
2158 /* Unblock any waiting calls */
2159 wake_up_interruptible_all(&priv->wait_command_queue);
2160
2161 /* Wipe out the EXIT_PENDING status bit if we are not actually
2162 * exiting the module */
2163 if (!exit_pending)
2164 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2165
2166 /* stop and reset the on-board processor */
2167 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2168
2169 /* tell the device to stop sending interrupts */
2170 spin_lock_irqsave(&priv->lock, flags);
2171 iwl_legacy_disable_interrupts(priv);
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173 iwl4965_synchronize_irq(priv);
2174
2175 if (priv->mac80211_registered)
2176 ieee80211_stop_queues(priv->hw);
2177
2178 /* If we have not previously called iwl_init() then
2179 * clear all bits but the RF Kill bit and return */
2180 if (!iwl_legacy_is_init(priv)) {
2181 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2182 STATUS_RF_KILL_HW |
2183 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2184 STATUS_GEO_CONFIGURED |
2185 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2186 STATUS_EXIT_PENDING;
2187 goto exit;
2188 }
2189
2190 /* ...otherwise clear out all the status bits but the RF Kill
2191 * bit and continue taking the NIC down. */
2192 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2193 STATUS_RF_KILL_HW |
2194 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2195 STATUS_GEO_CONFIGURED |
2196 test_bit(STATUS_FW_ERROR, &priv->status) <<
2197 STATUS_FW_ERROR |
2198 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2199 STATUS_EXIT_PENDING;
2200
2201 iwl4965_txq_ctx_stop(priv);
2202 iwl4965_rxq_stop(priv);
2203
2204 /* Power-down device's busmaster DMA clocks */
2205 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2206 udelay(5);
2207
2208 /* Make sure (redundant) we've released our request to stay awake */
2209 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
2210 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2211
2212 /* Stop the device, and put it in low power state */
2213 iwl_legacy_apm_stop(priv);
2214
2215 exit:
2216 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2217
2218 dev_kfree_skb(priv->beacon_skb);
2219 priv->beacon_skb = NULL;
2220
2221 /* clear out any free frames */
2222 iwl4965_clear_free_frames(priv);
2223}
2224
2225static void iwl4965_down(struct iwl_priv *priv)
2226{
2227 mutex_lock(&priv->mutex);
2228 __iwl4965_down(priv);
2229 mutex_unlock(&priv->mutex);
2230
2231 iwl4965_cancel_deferred_work(priv);
2232}
2233
2234#define HW_READY_TIMEOUT (50)
2235
2236static int iwl4965_set_hw_ready(struct iwl_priv *priv)
2237{
2238 int ret = 0;
2239
2240 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2242
2243 /* See if we got it */
2244 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2245 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2246 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2247 HW_READY_TIMEOUT);
2248 if (ret != -ETIMEDOUT)
2249 priv->hw_ready = true;
2250 else
2251 priv->hw_ready = false;
2252
2253 IWL_DEBUG_INFO(priv, "hardware %s\n",
2254 (priv->hw_ready == 1) ? "ready" : "not ready");
2255 return ret;
2256}
2257
2258static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
2259{
2260 int ret = 0;
2261
2262 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
2263
2264 ret = iwl4965_set_hw_ready(priv);
2265 if (priv->hw_ready)
2266 return ret;
2267
2268 /* If HW is not ready, prepare the conditions to check again */
2269 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2270 CSR_HW_IF_CONFIG_REG_PREPARE);
2271
2272 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2273 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2274 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2275
2276 /* HW should be ready by now, check again. */
2277 if (ret != -ETIMEDOUT)
2278 iwl4965_set_hw_ready(priv);
2279
2280 return ret;
2281}
2282
2283#define MAX_HW_RESTARTS 5
2284
2285static int __iwl4965_up(struct iwl_priv *priv)
2286{
2287 struct iwl_rxon_context *ctx;
2288 int i;
2289 int ret;
2290
2291 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2292 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2293 return -EIO;
2294 }
2295
2296 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2297 IWL_ERR(priv, "ucode not available for device bringup\n");
2298 return -EIO;
2299 }
2300
2301 for_each_context(priv, ctx) {
2302 ret = iwl4965_alloc_bcast_station(priv, ctx);
2303 if (ret) {
2304 iwl_legacy_dealloc_bcast_stations(priv);
2305 return ret;
2306 }
2307 }
2308
2309 iwl4965_prepare_card_hw(priv);
2310
2311 if (!priv->hw_ready) {
2312 IWL_WARN(priv, "Exit HW not ready\n");
2313 return -EIO;
2314 }
2315
2316 /* If platform's RF_KILL switch is NOT set to KILL */
2317 if (iwl_read32(priv,
2318 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2319 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2320 else
2321 set_bit(STATUS_RF_KILL_HW, &priv->status);
2322
2323 if (iwl_legacy_is_rfkill(priv)) {
2324 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2325
2326 iwl_legacy_enable_interrupts(priv);
2327 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2328 return 0;
2329 }
2330
2331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2332
2333 /* must be initialised before iwl_hw_nic_init */
2334 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2335
2336 ret = iwl4965_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_legacy_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 /* Copy original ucode data image from disk into backup cache.
2356 * This will be used to initialize the on-board processor's
2357 * data SRAM for a clean start when the runtime program first loads. */
2358 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2359 priv->ucode_data.len);
2360
2361 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2362
2363 /* load bootstrap state machine,
2364 * load bootstrap program into processor's memory,
2365 * prepare to load the "initialize" uCode */
2366 ret = priv->cfg->ops->lib->load_ucode(priv);
2367
2368 if (ret) {
2369 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2370 ret);
2371 continue;
2372 }
2373
2374 /* start card; "initialize" will load runtime ucode */
2375 iwl4965_nic_start(priv);
2376
2377 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2378
2379 return 0;
2380 }
2381
2382 set_bit(STATUS_EXIT_PENDING, &priv->status);
2383 __iwl4965_down(priv);
2384 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2385
2386 /* tried to restart and config the device for as long as our
2387 * patience could withstand */
2388 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2389 return -EIO;
2390}
2391
2392
2393/*****************************************************************************
2394 *
2395 * Workqueue callbacks
2396 *
2397 *****************************************************************************/
2398
2399static void iwl4965_bg_init_alive_start(struct work_struct *data)
2400{
2401 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work);
2403
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return;
2406
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv);
2409 mutex_unlock(&priv->mutex);
2410}
2411
2412static void iwl4965_bg_alive_start(struct work_struct *data)
2413{
2414 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work);
2416
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return;
2419
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv);
2422 mutex_unlock(&priv->mutex);
2423}
2424
2425static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2426{
2427 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2428 run_time_calib_work);
2429
2430 mutex_lock(&priv->mutex);
2431
2432 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2433 test_bit(STATUS_SCANNING, &priv->status)) {
2434 mutex_unlock(&priv->mutex);
2435 return;
2436 }
2437
2438 if (priv->start_calib) {
2439 iwl4965_chain_noise_calibration(priv,
2440 (void *)&priv->_4965.statistics);
2441 iwl4965_sensitivity_calibration(priv,
2442 (void *)&priv->_4965.statistics);
2443 }
2444
2445 mutex_unlock(&priv->mutex);
2446}
2447
2448static void iwl4965_bg_restart(struct work_struct *data)
2449{
2450 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2451
2452 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2453 return;
2454
2455 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2456 struct iwl_rxon_context *ctx;
2457
2458 mutex_lock(&priv->mutex);
2459 for_each_context(priv, ctx)
2460 ctx->vif = NULL;
2461 priv->is_open = 0;
2462
2463 __iwl4965_down(priv);
2464
2465 mutex_unlock(&priv->mutex);
2466 iwl4965_cancel_deferred_work(priv);
2467 ieee80211_restart_hw(priv->hw);
2468 } else {
2469 iwl4965_down(priv);
2470
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2472 return;
2473
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex);
2477 }
2478}
2479
2480static void iwl4965_bg_rx_replenish(struct work_struct *data)
2481{
2482 struct iwl_priv *priv =
2483 container_of(data, struct iwl_priv, rx_replenish);
2484
2485 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2486 return;
2487
2488 mutex_lock(&priv->mutex);
2489 iwl4965_rx_replenish(priv);
2490 mutex_unlock(&priv->mutex);
2491}
2492
2493/*****************************************************************************
2494 *
2495 * mac80211 entry point functions
2496 *
2497 *****************************************************************************/
2498
2499#define UCODE_READY_TIMEOUT (4 * HZ)
2500
2501/*
2502 * Not a mac80211 entry point function, but it fits in with all the
2503 * other mac80211 functions grouped here.
2504 */
2505static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2506 u32 max_probe_length)
2507{
2508 int ret;
2509 struct ieee80211_hw *hw = priv->hw;
2510 struct iwl_rxon_context *ctx;
2511
2512 hw->rate_control_algorithm = "iwl-4965-rs";
2513
2514 /* Tell mac80211 our characteristics */
2515 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2516 IEEE80211_HW_AMPDU_AGGREGATION |
2517 IEEE80211_HW_NEED_DTIM_PERIOD |
2518 IEEE80211_HW_SPECTRUM_MGMT |
2519 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2520
2521 if (priv->cfg->sku & IWL_SKU_N)
2522 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2523 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2524
2525 hw->sta_data_size = sizeof(struct iwl_station_priv);
2526 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2527
2528 for_each_context(priv, ctx) {
2529 hw->wiphy->interface_modes |= ctx->interface_modes;
2530 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2531 }
2532
2533 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2534 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2535
2536 /*
2537 * For now, disable PS by default because it affects
2538 * RX performance significantly.
2539 */
2540 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2541
2542 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2543 /* we create the 802.11 header and a zero-length SSID element */
2544 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2545
2546 /* Default value; 4 EDCA QOS priorities */
2547 hw->queues = 4;
2548
2549 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2550
2551 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2552 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2553 &priv->bands[IEEE80211_BAND_2GHZ];
2554 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2555 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2556 &priv->bands[IEEE80211_BAND_5GHZ];
2557
2558 iwl_legacy_leds_init(priv);
2559
2560 ret = ieee80211_register_hw(priv->hw);
2561 if (ret) {
2562 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2563 return ret;
2564 }
2565 priv->mac80211_registered = 1;
2566
2567 return 0;
2568}
2569
2570
2571int iwl4965_mac_start(struct ieee80211_hw *hw)
2572{
2573 struct iwl_priv *priv = hw->priv;
2574 int ret;
2575
2576 IWL_DEBUG_MAC80211(priv, "enter\n");
2577
2578 /* we should be verifying the device is ready to be opened */
2579 mutex_lock(&priv->mutex);
2580 ret = __iwl4965_up(priv);
2581 mutex_unlock(&priv->mutex);
2582
2583 if (ret)
2584 return ret;
2585
2586 if (iwl_legacy_is_rfkill(priv))
2587 goto out;
2588
2589 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2590
2591 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2592 * mac80211 will not be run successfully. */
2593 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
2594 test_bit(STATUS_READY, &priv->status),
2595 UCODE_READY_TIMEOUT);
2596 if (!ret) {
2597 if (!test_bit(STATUS_READY, &priv->status)) {
2598 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2599 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2600 return -ETIMEDOUT;
2601 }
2602 }
2603
2604 iwl4965_led_enable(priv);
2605
2606out:
2607 priv->is_open = 1;
2608 IWL_DEBUG_MAC80211(priv, "leave\n");
2609 return 0;
2610}
2611
2612void iwl4965_mac_stop(struct ieee80211_hw *hw)
2613{
2614 struct iwl_priv *priv = hw->priv;
2615
2616 IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618 if (!priv->is_open)
2619 return;
2620
2621 priv->is_open = 0;
2622
2623 iwl4965_down(priv);
2624
2625 flush_workqueue(priv->workqueue);
2626
2627 /* enable interrupts again in order to receive rfkill changes */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv);
2630
2631 IWL_DEBUG_MAC80211(priv, "leave\n");
2632}
2633
2634void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2635{
2636 struct iwl_priv *priv = hw->priv;
2637
2638 IWL_DEBUG_MACDUMP(priv, "enter\n");
2639
2640 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2641 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2642
2643 if (iwl4965_tx_skb(priv, skb))
2644 dev_kfree_skb_any(skb);
2645
2646 IWL_DEBUG_MACDUMP(priv, "leave\n");
2647}
2648
2649void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2650 struct ieee80211_vif *vif,
2651 struct ieee80211_key_conf *keyconf,
2652 struct ieee80211_sta *sta,
2653 u32 iv32, u16 *phase1key)
2654{
2655 struct iwl_priv *priv = hw->priv;
2656 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2657
2658 IWL_DEBUG_MAC80211(priv, "enter\n");
2659
2660 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2661 iv32, phase1key);
2662
2663 IWL_DEBUG_MAC80211(priv, "leave\n");
2664}
2665
2666int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2667 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2668 struct ieee80211_key_conf *key)
2669{
2670 struct iwl_priv *priv = hw->priv;
2671 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2672 struct iwl_rxon_context *ctx = vif_priv->ctx;
2673 int ret;
2674 u8 sta_id;
2675 bool is_default_wep_key = false;
2676
2677 IWL_DEBUG_MAC80211(priv, "enter\n");
2678
2679 if (priv->cfg->mod_params->sw_crypto) {
2680 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2681 return -EOPNOTSUPP;
2682 }
2683
2684 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2685 if (sta_id == IWL_INVALID_STATION)
2686 return -EINVAL;
2687
2688 mutex_lock(&priv->mutex);
2689 iwl_legacy_scan_cancel_timeout(priv, 100);
2690
2691 /*
2692 * If we are getting WEP group key and we didn't receive any key mapping
2693 * so far, we are in legacy wep mode (group key only), otherwise we are
2694 * in 1X mode.
2695 * In legacy wep mode, we use another host command to the uCode.
2696 */
2697 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2698 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2699 !sta) {
2700 if (cmd == SET_KEY)
2701 is_default_wep_key = !ctx->key_mapping_keys;
2702 else
2703 is_default_wep_key =
2704 (key->hw_key_idx == HW_KEY_DEFAULT);
2705 }
2706
2707 switch (cmd) {
2708 case SET_KEY:
2709 if (is_default_wep_key)
2710 ret = iwl4965_set_default_wep_key(priv,
2711 vif_priv->ctx, key);
2712 else
2713 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2714 key, sta_id);
2715
2716 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2717 break;
2718 case DISABLE_KEY:
2719 if (is_default_wep_key)
2720 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2721 else
2722 ret = iwl4965_remove_dynamic_key(priv, ctx,
2723 key, sta_id);
2724
2725 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2726 break;
2727 default:
2728 ret = -EINVAL;
2729 }
2730
2731 mutex_unlock(&priv->mutex);
2732 IWL_DEBUG_MAC80211(priv, "leave\n");
2733
2734 return ret;
2735}
2736
2737int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2738 struct ieee80211_vif *vif,
2739 enum ieee80211_ampdu_mlme_action action,
2740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2741 u8 buf_size)
2742{
2743 struct iwl_priv *priv = hw->priv;
2744 int ret = -EINVAL;
2745
2746 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2747 sta->addr, tid);
2748
2749 if (!(priv->cfg->sku & IWL_SKU_N))
2750 return -EACCES;
2751
2752 mutex_lock(&priv->mutex);
2753
2754 switch (action) {
2755 case IEEE80211_AMPDU_RX_START:
2756 IWL_DEBUG_HT(priv, "start Rx\n");
2757 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2758 break;
2759 case IEEE80211_AMPDU_RX_STOP:
2760 IWL_DEBUG_HT(priv, "stop Rx\n");
2761 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2762 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2763 ret = 0;
2764 break;
2765 case IEEE80211_AMPDU_TX_START:
2766 IWL_DEBUG_HT(priv, "start Tx\n");
2767 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2768 if (ret == 0) {
2769 priv->_4965.agg_tids_count++;
2770 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2771 priv->_4965.agg_tids_count);
2772 }
2773 break;
2774 case IEEE80211_AMPDU_TX_STOP:
2775 IWL_DEBUG_HT(priv, "stop Tx\n");
2776 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2777 if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
2778 priv->_4965.agg_tids_count--;
2779 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2780 priv->_4965.agg_tids_count);
2781 }
2782 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2783 ret = 0;
2784 break;
2785 case IEEE80211_AMPDU_TX_OPERATIONAL:
2786 ret = 0;
2787 break;
2788 }
2789 mutex_unlock(&priv->mutex);
2790
2791 return ret;
2792}
2793
2794int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2795 struct ieee80211_vif *vif,
2796 struct ieee80211_sta *sta)
2797{
2798 struct iwl_priv *priv = hw->priv;
2799 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2800 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2801 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2802 int ret;
2803 u8 sta_id;
2804
2805 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2806 sta->addr);
2807 mutex_lock(&priv->mutex);
2808 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2809 sta->addr);
2810 sta_priv->common.sta_id = IWL_INVALID_STATION;
2811
2812 atomic_set(&sta_priv->pending_frames, 0);
2813
2814 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2815 is_ap, sta, &sta_id);
2816 if (ret) {
2817 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2818 sta->addr, ret);
2819 /* Should we return success if return code is EEXIST ? */
2820 mutex_unlock(&priv->mutex);
2821 return ret;
2822 }
2823
2824 sta_priv->common.sta_id = sta_id;
2825
2826 /* Initialize rate scaling */
2827 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2828 sta->addr);
2829 iwl4965_rs_rate_init(priv, sta, sta_id);
2830 mutex_unlock(&priv->mutex);
2831
2832 return 0;
2833}
2834
2835void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2836 struct ieee80211_channel_switch *ch_switch)
2837{
2838 struct iwl_priv *priv = hw->priv;
2839 const struct iwl_channel_info *ch_info;
2840 struct ieee80211_conf *conf = &hw->conf;
2841 struct ieee80211_channel *channel = ch_switch->channel;
2842 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2843
2844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2845 u16 ch;
2846 unsigned long flags = 0;
2847
2848 IWL_DEBUG_MAC80211(priv, "enter\n");
2849
2850 if (iwl_legacy_is_rfkill(priv))
2851 goto out_exit;
2852
2853 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2854 test_bit(STATUS_SCANNING, &priv->status))
2855 goto out_exit;
2856
2857 if (!iwl_legacy_is_associated_ctx(ctx))
2858 goto out_exit;
2859
2860 /* channel switch in progress */
2861 if (priv->switch_rxon.switch_in_progress == true)
2862 goto out_exit;
2863
2864 mutex_lock(&priv->mutex);
2865 if (priv->cfg->ops->lib->set_channel_switch) {
2866
2867 ch = channel->hw_value;
2868 if (le16_to_cpu(ctx->active.channel) != ch) {
2869 ch_info = iwl_legacy_get_channel_info(priv,
2870 channel->band,
2871 ch);
2872 if (!iwl_legacy_is_channel_valid(ch_info)) {
2873 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2874 goto out;
2875 }
2876 spin_lock_irqsave(&priv->lock, flags);
2877
2878 priv->current_ht_config.smps = conf->smps_mode;
2879
2880 /* Configure HT40 channels */
2881 ctx->ht.enabled = conf_is_ht(conf);
2882 if (ctx->ht.enabled) {
2883 if (conf_is_ht40_minus(conf)) {
2884 ctx->ht.extension_chan_offset =
2885 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2886 ctx->ht.is_40mhz = true;
2887 } else if (conf_is_ht40_plus(conf)) {
2888 ctx->ht.extension_chan_offset =
2889 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2890 ctx->ht.is_40mhz = true;
2891 } else {
2892 ctx->ht.extension_chan_offset =
2893 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2894 ctx->ht.is_40mhz = false;
2895 }
2896 } else
2897 ctx->ht.is_40mhz = false;
2898
2899 if ((le16_to_cpu(ctx->staging.channel) != ch))
2900 ctx->staging.flags = 0;
2901
2902 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2903 iwl_legacy_set_rxon_ht(priv, ht_conf);
2904 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2905 ctx->vif);
2906 spin_unlock_irqrestore(&priv->lock, flags);
2907
2908 iwl_legacy_set_rate(priv);
2909 /*
2910 * at this point, staging_rxon has the
2911 * configuration for channel switch
2912 */
2913 if (priv->cfg->ops->lib->set_channel_switch(priv,
2914 ch_switch))
2915 priv->switch_rxon.switch_in_progress = false;
2916 }
2917 }
2918out:
2919 mutex_unlock(&priv->mutex);
2920out_exit:
2921 if (!priv->switch_rxon.switch_in_progress)
2922 ieee80211_chswitch_done(ctx->vif, false);
2923 IWL_DEBUG_MAC80211(priv, "leave\n");
2924}
2925
2926void iwl4965_configure_filter(struct ieee80211_hw *hw,
2927 unsigned int changed_flags,
2928 unsigned int *total_flags,
2929 u64 multicast)
2930{
2931 struct iwl_priv *priv = hw->priv;
2932 __le32 filter_or = 0, filter_nand = 0;
2933 struct iwl_rxon_context *ctx;
2934
2935#define CHK(test, flag) do { \
2936 if (*total_flags & (test)) \
2937 filter_or |= (flag); \
2938 else \
2939 filter_nand |= (flag); \
2940 } while (0)
2941
2942 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2943 changed_flags, *total_flags);
2944
2945 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2946 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2947 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2948 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2949
2950#undef CHK
2951
2952 mutex_lock(&priv->mutex);
2953
2954 for_each_context(priv, ctx) {
2955 ctx->staging.filter_flags &= ~filter_nand;
2956 ctx->staging.filter_flags |= filter_or;
2957
2958 /*
2959 * Not committing directly because hardware can perform a scan,
2960 * but we'll eventually commit the filter flags change anyway.
2961 */
2962 }
2963
2964 mutex_unlock(&priv->mutex);
2965
2966 /*
2967 * Receiving all multicast frames is always enabled by the
2968 * default flags setup in iwl_legacy_connection_init_rx_config()
2969 * since we currently do not support programming multicast
2970 * filters into the device.
2971 */
2972 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2973 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2974}
2975
2976/*****************************************************************************
2977 *
2978 * driver setup and teardown
2979 *
2980 *****************************************************************************/
2981
2982static void iwl4965_bg_txpower_work(struct work_struct *work)
2983{
2984 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2985 txpower_work);
2986
2987 /* If a scan happened to start before we got here
2988 * then just return; the statistics notification will
2989 * kick off another scheduled work to compensate for
2990 * any temperature delta we missed here. */
2991 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2992 test_bit(STATUS_SCANNING, &priv->status))
2993 return;
2994
2995 mutex_lock(&priv->mutex);
2996
2997 /* Regardless of if we are associated, we must reconfigure the
2998 * TX power since frames can be sent on non-radar channels while
2999 * not associated */
3000 priv->cfg->ops->lib->send_tx_power(priv);
3001
3002 /* Update last_temperature to keep is_calib_needed from running
3003 * when it isn't needed... */
3004 priv->last_temperature = priv->temperature;
3005
3006 mutex_unlock(&priv->mutex);
3007}
3008
3009static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
3010{
3011 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3012
3013 init_waitqueue_head(&priv->wait_command_queue);
3014
3015 INIT_WORK(&priv->restart, iwl4965_bg_restart);
3016 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
3017 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
3018 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
3019 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
3020
3021 iwl_legacy_setup_scan_deferred_work(priv);
3022
3023 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
3024
3025 init_timer(&priv->statistics_periodic);
3026 priv->statistics_periodic.data = (unsigned long)priv;
3027 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3028
3029 init_timer(&priv->ucode_trace);
3030 priv->ucode_trace.data = (unsigned long)priv;
3031 priv->ucode_trace.function = iwl4965_bg_ucode_trace;
3032
3033 init_timer(&priv->watchdog);
3034 priv->watchdog.data = (unsigned long)priv;
3035 priv->watchdog.function = iwl_legacy_bg_watchdog;
3036
3037 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3038 iwl4965_irq_tasklet, (unsigned long)priv);
3039}
3040
3041static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
3042{
3043 cancel_work_sync(&priv->txpower_work);
3044 cancel_delayed_work_sync(&priv->init_alive_start);
3045 cancel_delayed_work(&priv->alive_start);
3046 cancel_work_sync(&priv->run_time_calib_work);
3047
3048 iwl_legacy_cancel_scan_deferred_work(priv);
3049
3050 del_timer_sync(&priv->statistics_periodic);
3051 del_timer_sync(&priv->ucode_trace);
3052}
3053
3054static void iwl4965_init_hw_rates(struct iwl_priv *priv,
3055 struct ieee80211_rate *rates)
3056{
3057 int i;
3058
3059 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3060 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
3061 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3062 rates[i].hw_value_short = i;
3063 rates[i].flags = 0;
3064 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3065 /*
3066 * If CCK != 1M then set short preamble rate flag.
3067 */
3068 rates[i].flags |=
3069 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3070 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3071 }
3072 }
3073}
3074/*
3075 * Acquire priv->lock before calling this function !
3076 */
3077void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
3078{
3079 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
3080 (index & 0xff) | (txq_id << 8));
3081 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
3082}
3083
3084void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
3085 struct iwl_tx_queue *txq,
3086 int tx_fifo_id, int scd_retry)
3087{
3088 int txq_id = txq->q.id;
3089
3090 /* Find out whether to activate Tx queue */
3091 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
3092
3093 /* Set up and activate */
3094 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3095 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
3096 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
3097 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
3098 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
3099 IWL49_SCD_QUEUE_STTS_REG_MSK);
3100
3101 txq->sched_retry = scd_retry;
3102
3103 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
3104 active ? "Activate" : "Deactivate",
3105 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
3106}
3107
3108
3109static int iwl4965_init_drv(struct iwl_priv *priv)
3110{
3111 int ret;
3112
3113 spin_lock_init(&priv->sta_lock);
3114 spin_lock_init(&priv->hcmd_lock);
3115
3116 INIT_LIST_HEAD(&priv->free_frames);
3117
3118 mutex_init(&priv->mutex);
3119 mutex_init(&priv->sync_cmd_mutex);
3120
3121 priv->ieee_channels = NULL;
3122 priv->ieee_rates = NULL;
3123 priv->band = IEEE80211_BAND_2GHZ;
3124
3125 priv->iw_mode = NL80211_IFTYPE_STATION;
3126 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3127 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3128 priv->_4965.agg_tids_count = 0;
3129
3130 /* initialize force reset */
3131 priv->force_reset[IWL_RF_RESET].reset_duration =
3132 IWL_DELAY_NEXT_FORCE_RF_RESET;
3133 priv->force_reset[IWL_FW_RESET].reset_duration =
3134 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3135
3136 /* Choose which receivers/antennas to use */
3137 if (priv->cfg->ops->hcmd->set_rxon_chain)
3138 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3139 &priv->contexts[IWL_RXON_CTX_BSS]);
3140
3141 iwl_legacy_init_scan_params(priv);
3142
3143 /* Set the tx_power_user_lmt to the lowest power level
3144 * this value will get overwritten by channel max power avg
3145 * from eeprom */
3146 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3147 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148
3149 ret = iwl_legacy_init_channel_map(priv);
3150 if (ret) {
3151 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3152 goto err;
3153 }
3154
3155 ret = iwl_legacy_init_geos(priv);
3156 if (ret) {
3157 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3158 goto err_free_channel_map;
3159 }
3160 iwl4965_init_hw_rates(priv, priv->ieee_rates);
3161
3162 return 0;
3163
3164err_free_channel_map:
3165 iwl_legacy_free_channel_map(priv);
3166err:
3167 return ret;
3168}
3169
3170static void iwl4965_uninit_drv(struct iwl_priv *priv)
3171{
3172 iwl4965_calib_free_results(priv);
3173 iwl_legacy_free_geos(priv);
3174 iwl_legacy_free_channel_map(priv);
3175 kfree(priv->scan_cmd);
3176}
3177
3178static void iwl4965_hw_detect(struct iwl_priv *priv)
3179{
3180 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3181 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3182 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
3183 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3184}
3185
3186static int iwl4965_set_hw_params(struct iwl_priv *priv)
3187{
3188 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3189 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3190 if (priv->cfg->mod_params->amsdu_size_8K)
3191 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3192 else
3193 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3194
3195 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3196
3197 if (priv->cfg->mod_params->disable_11n)
3198 priv->cfg->sku &= ~IWL_SKU_N;
3199
3200 /* Device-specific setup */
3201 return priv->cfg->ops->lib->set_hw_params(priv);
3202}
3203
3204static const u8 iwl4965_bss_ac_to_fifo[] = {
3205 IWL_TX_FIFO_VO,
3206 IWL_TX_FIFO_VI,
3207 IWL_TX_FIFO_BE,
3208 IWL_TX_FIFO_BK,
3209};
3210
3211static const u8 iwl4965_bss_ac_to_queue[] = {
3212 0, 1, 2, 3,
3213};
3214
3215static int
3216iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3217{
3218 int err = 0, i;
3219 struct iwl_priv *priv;
3220 struct ieee80211_hw *hw;
3221 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3222 unsigned long flags;
3223 u16 pci_cmd;
3224
3225 /************************
3226 * 1. Allocating HW data
3227 ************************/
3228
3229 hw = iwl_legacy_alloc_all(cfg);
3230 if (!hw) {
3231 err = -ENOMEM;
3232 goto out;
3233 }
3234 priv = hw->priv;
3235 /* At this point both hw and priv are allocated. */
3236
3237 /*
3238 * The default context is always valid,
3239 * more may be discovered when firmware
3240 * is loaded.
3241 */
3242 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3243
3244 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3245 priv->contexts[i].ctxid = i;
3246
3247 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3248 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3249 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3250 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3251 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3252 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3253 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3254 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3255 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
3256 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
3257 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3258 BIT(NL80211_IFTYPE_ADHOC);
3259 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3260 BIT(NL80211_IFTYPE_STATION);
3261 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3262 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3263 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3264 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3265
3266 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
3267
3268 SET_IEEE80211_DEV(hw, &pdev->dev);
3269
3270 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3271 priv->cfg = cfg;
3272 priv->pci_dev = pdev;
3273 priv->inta_mask = CSR_INI_SET_MASK;
3274
3275 if (iwl_legacy_alloc_traffic_mem(priv))
3276 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3277
3278 /**************************
3279 * 2. Initializing PCI bus
3280 **************************/
3281 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3282 PCIE_LINK_STATE_CLKPM);
3283
3284 if (pci_enable_device(pdev)) {
3285 err = -ENODEV;
3286 goto out_ieee80211_free_hw;
3287 }
3288
3289 pci_set_master(pdev);
3290
3291 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3292 if (!err)
3293 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3294 if (err) {
3295 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3296 if (!err)
3297 err = pci_set_consistent_dma_mask(pdev,
3298 DMA_BIT_MASK(32));
3299 /* both attempts failed: */
3300 if (err) {
3301 IWL_WARN(priv, "No suitable DMA available.\n");
3302 goto out_pci_disable_device;
3303 }
3304 }
3305
3306 err = pci_request_regions(pdev, DRV_NAME);
3307 if (err)
3308 goto out_pci_disable_device;
3309
3310 pci_set_drvdata(pdev, priv);
3311
3312
3313 /***********************
3314 * 3. Read REV register
3315 ***********************/
3316 priv->hw_base = pci_iomap(pdev, 0, 0);
3317 if (!priv->hw_base) {
3318 err = -ENODEV;
3319 goto out_pci_release_regions;
3320 }
3321
3322 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3323 (unsigned long long) pci_resource_len(pdev, 0));
3324 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3325
3326 /* these spin locks will be used in apm_ops.init and EEPROM access
3327 * we should init now
3328 */
3329 spin_lock_init(&priv->reg_lock);
3330 spin_lock_init(&priv->lock);
3331
3332 /*
3333 * stop and reset the on-board processor just in case it is in a
3334 * strange state ... like being left stranded by a primary kernel
3335 * and this is now the kdump kernel trying to start up
3336 */
3337 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3338
3339 iwl4965_hw_detect(priv);
3340 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3341 priv->cfg->name, priv->hw_rev);
3342
3343 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3344 * PCI Tx retries from interfering with C3 CPU state */
3345 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3346
3347 iwl4965_prepare_card_hw(priv);
3348 if (!priv->hw_ready) {
3349 IWL_WARN(priv, "Failed, HW not ready\n");
3350 goto out_iounmap;
3351 }
3352
3353 /*****************
3354 * 4. Read EEPROM
3355 *****************/
3356 /* Read the EEPROM */
3357 err = iwl_legacy_eeprom_init(priv);
3358 if (err) {
3359 IWL_ERR(priv, "Unable to init EEPROM\n");
3360 goto out_iounmap;
3361 }
3362 err = iwl4965_eeprom_check_version(priv);
3363 if (err)
3364 goto out_free_eeprom;
3365
3366 if (err)
3367 goto out_free_eeprom;
3368
3369 /* extract MAC Address */
3370 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3371 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3372 priv->hw->wiphy->addresses = priv->addresses;
3373 priv->hw->wiphy->n_addresses = 1;
3374
3375 /************************
3376 * 5. Setup HW constants
3377 ************************/
3378 if (iwl4965_set_hw_params(priv)) {
3379 IWL_ERR(priv, "failed to set hw parameters\n");
3380 goto out_free_eeprom;
3381 }
3382
3383 /*******************
3384 * 6. Setup priv
3385 *******************/
3386
3387 err = iwl4965_init_drv(priv);
3388 if (err)
3389 goto out_free_eeprom;
3390 /* At this point both hw and priv are initialized. */
3391
3392 /********************
3393 * 7. Setup services
3394 ********************/
3395 spin_lock_irqsave(&priv->lock, flags);
3396 iwl_legacy_disable_interrupts(priv);
3397 spin_unlock_irqrestore(&priv->lock, flags);
3398
3399 pci_enable_msi(priv->pci_dev);
3400
3401 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3402 IRQF_SHARED, DRV_NAME, priv);
3403 if (err) {
3404 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3405 goto out_disable_msi;
3406 }
3407
3408 iwl4965_setup_deferred_work(priv);
3409 iwl4965_setup_rx_handlers(priv);
3410
3411 /*********************************************
3412 * 8. Enable interrupts and read RFKILL state
3413 *********************************************/
3414
3415 /* enable interrupts if needed: hw bug w/a */
3416 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3417 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3418 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3419 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3420 }
3421
3422 iwl_legacy_enable_interrupts(priv);
3423
3424 /* If platform's RF_KILL switch is NOT set to KILL */
3425 if (iwl_read32(priv, CSR_GP_CNTRL) &
3426 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3427 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3428 else
3429 set_bit(STATUS_RF_KILL_HW, &priv->status);
3430
3431 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3432 test_bit(STATUS_RF_KILL_HW, &priv->status));
3433
3434 iwl_legacy_power_initialize(priv);
3435
3436 init_completion(&priv->_4965.firmware_loading_complete);
3437
3438 err = iwl4965_request_firmware(priv, true);
3439 if (err)
3440 goto out_destroy_workqueue;
3441
3442 return 0;
3443
3444 out_destroy_workqueue:
3445 destroy_workqueue(priv->workqueue);
3446 priv->workqueue = NULL;
3447 free_irq(priv->pci_dev->irq, priv);
3448 out_disable_msi:
3449 pci_disable_msi(priv->pci_dev);
3450 iwl4965_uninit_drv(priv);
3451 out_free_eeprom:
3452 iwl_legacy_eeprom_free(priv);
3453 out_iounmap:
3454 pci_iounmap(pdev, priv->hw_base);
3455 out_pci_release_regions:
3456 pci_set_drvdata(pdev, NULL);
3457 pci_release_regions(pdev);
3458 out_pci_disable_device:
3459 pci_disable_device(pdev);
3460 out_ieee80211_free_hw:
3461 iwl_legacy_free_traffic_mem(priv);
3462 ieee80211_free_hw(priv->hw);
3463 out:
3464 return err;
3465}
3466
3467static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3468{
3469 struct iwl_priv *priv = pci_get_drvdata(pdev);
3470 unsigned long flags;
3471
3472 if (!priv)
3473 return;
3474
3475 wait_for_completion(&priv->_4965.firmware_loading_complete);
3476
3477 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3478
3479 iwl_legacy_dbgfs_unregister(priv);
3480 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3481
3482 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3483 * to be called and iwl4965_down since we are removing the device
3484 * we need to set STATUS_EXIT_PENDING bit.
3485 */
3486 set_bit(STATUS_EXIT_PENDING, &priv->status);
3487
3488 iwl_legacy_leds_exit(priv);
3489
3490 if (priv->mac80211_registered) {
3491 ieee80211_unregister_hw(priv->hw);
3492 priv->mac80211_registered = 0;
3493 } else {
3494 iwl4965_down(priv);
3495 }
3496
3497 /*
3498 * Make sure device is reset to low power before unloading driver.
3499 * This may be redundant with iwl4965_down(), but there are paths to
3500 * run iwl4965_down() without calling apm_ops.stop(), and there are
3501 * paths to avoid running iwl4965_down() at all before leaving driver.
3502 * This (inexpensive) call *makes sure* device is reset.
3503 */
3504 iwl_legacy_apm_stop(priv);
3505
3506 /* make sure we flush any pending irq or
3507 * tasklet for the driver
3508 */
3509 spin_lock_irqsave(&priv->lock, flags);
3510 iwl_legacy_disable_interrupts(priv);
3511 spin_unlock_irqrestore(&priv->lock, flags);
3512
3513 iwl4965_synchronize_irq(priv);
3514
3515 iwl4965_dealloc_ucode_pci(priv);
3516
3517 if (priv->rxq.bd)
3518 iwl4965_rx_queue_free(priv, &priv->rxq);
3519 iwl4965_hw_txq_ctx_free(priv);
3520
3521 iwl_legacy_eeprom_free(priv);
3522
3523
3524 /*netif_stop_queue(dev); */
3525 flush_workqueue(priv->workqueue);
3526
3527 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3528 * priv->workqueue... so we can't take down the workqueue
3529 * until now... */
3530 destroy_workqueue(priv->workqueue);
3531 priv->workqueue = NULL;
3532 iwl_legacy_free_traffic_mem(priv);
3533
3534 free_irq(priv->pci_dev->irq, priv);
3535 pci_disable_msi(priv->pci_dev);
3536 pci_iounmap(pdev, priv->hw_base);
3537 pci_release_regions(pdev);
3538 pci_disable_device(pdev);
3539 pci_set_drvdata(pdev, NULL);
3540
3541 iwl4965_uninit_drv(priv);
3542
3543 dev_kfree_skb(priv->beacon_skb);
3544
3545 ieee80211_free_hw(priv->hw);
3546}
3547
3548/*
3549 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3550 * must be called under priv->lock and mac access
3551 */
3552void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3553{
3554 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3555}
3556
3557/*****************************************************************************
3558 *
3559 * driver and module entry point
3560 *
3561 *****************************************************************************/
3562
3563/* Hardware specific file defines the PCI IDs table for that hardware module */
3564static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3565#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3566 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3567 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3568#endif /* CONFIG_IWL4965 */
3569
3570 {0}
3571};
3572MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3573
3574static struct pci_driver iwl4965_driver = {
3575 .name = DRV_NAME,
3576 .id_table = iwl4965_hw_card_ids,
3577 .probe = iwl4965_pci_probe,
3578 .remove = __devexit_p(iwl4965_pci_remove),
3579 .driver.pm = IWL_LEGACY_PM_OPS,
3580};
3581
3582static int __init iwl4965_init(void)
3583{
3584
3585 int ret;
3586 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3587 pr_info(DRV_COPYRIGHT "\n");
3588
3589 ret = iwl4965_rate_control_register();
3590 if (ret) {
3591 pr_err("Unable to register rate control algorithm: %d\n", ret);
3592 return ret;
3593 }
3594
3595 ret = pci_register_driver(&iwl4965_driver);
3596 if (ret) {
3597 pr_err("Unable to initialize PCI module\n");
3598 goto error_register;
3599 }
3600
3601 return ret;
3602
3603error_register:
3604 iwl4965_rate_control_unregister();
3605 return ret;
3606}
3607
3608static void __exit iwl4965_exit(void)
3609{
3610 pci_unregister_driver(&iwl4965_driver);
3611 iwl4965_rate_control_unregister();
3612}
3613
3614module_exit(iwl4965_exit);
3615module_init(iwl4965_init);
3616
3617#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3618module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3619MODULE_PARM_DESC(debug, "debug output mask");
3620#endif
3621
3622module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3623MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3624module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3625MODULE_PARM_DESC(queues_num, "number of hw queues.");
3626module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3627MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3628module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3629 int, S_IRUGO);
3630MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3631module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3632MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index e1e3b1cf3cff..17d555f2215a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,18 +1,52 @@
1config IWLWIFI 1config IWLAGN
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS 5 select NEW_LEDS
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 ---help---
10 Select to build the driver supporting the:
11
12 Intel Wireless WiFi Link Next-Gen AGN
13
14 This option enables support for use with the following hardware:
15 Intel Wireless WiFi Link 6250AGN Adapter
16 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
17 Intel WiFi Link 1000BGN
18 Intel Wireless WiFi 5150AGN
19 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
20 Intel 6005 Series Wi-Fi Adapters
21 Intel 6030 Series Wi-Fi Adapters
22 Intel Wireless WiFi Link 6150BGN 2 Adapter
23 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
24 Intel 2000 Series Wi-Fi Adapters
25
26
27 This driver uses the kernel's mac80211 subsystem.
28
29 In order to use this driver, you will need a microcode (uCode)
30 image for it. You can obtain the microcode from:
31
32 <http://intellinuxwireless.org/>.
33
34 The microcode is typically installed in /lib/firmware. You can
35 look in the hotplug script /etc/hotplug/firmware.agent to
36 determine which directory FIRMWARE_DIR is set to when the script
37 runs.
38
39 If you want to compile the driver as a module ( = code which can be
40 inserted in and removed from the running kernel whenever you want),
41 say M here and read <file:Documentation/kbuild/modules.txt>. The
42 module will be called iwlagn.
9 43
10menu "Debugging Options" 44menu "Debugging Options"
11 depends on IWLWIFI 45 depends on IWLAGN
12 46
13config IWLWIFI_DEBUG 47config IWLWIFI_DEBUG
14 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 48 bool "Enable full debugging output in the iwlagn driver"
15 depends on IWLWIFI 49 depends on IWLAGN
16 ---help--- 50 ---help---
17 This option will enable debug tracing output for the iwlwifi drivers 51 This option will enable debug tracing output for the iwlwifi drivers
18 52
@@ -37,7 +71,7 @@ config IWLWIFI_DEBUG
37 71
38config IWLWIFI_DEBUGFS 72config IWLWIFI_DEBUGFS
39 bool "iwlagn debugfs support" 73 bool "iwlagn debugfs support"
40 depends on IWLWIFI && MAC80211_DEBUGFS 74 depends on IWLAGN && MAC80211_DEBUGFS
41 ---help--- 75 ---help---
42 Enable creation of debugfs files for the iwlwifi drivers. This 76 Enable creation of debugfs files for the iwlwifi drivers. This
43 is a low-impact option that allows getting insight into the 77 is a low-impact option that allows getting insight into the
@@ -45,13 +79,13 @@ config IWLWIFI_DEBUGFS
45 79
46config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE 80config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
47 bool "Experimental uCode support" 81 bool "Experimental uCode support"
48 depends on IWLWIFI && IWLWIFI_DEBUG 82 depends on IWLAGN && IWLWIFI_DEBUG
49 ---help--- 83 ---help---
50 Enable use of experimental ucode for testing and debugging. 84 Enable use of experimental ucode for testing and debugging.
51 85
52config IWLWIFI_DEVICE_TRACING 86config IWLWIFI_DEVICE_TRACING
53 bool "iwlwifi device access tracing" 87 bool "iwlwifi device access tracing"
54 depends on IWLWIFI 88 depends on IWLAGN
55 depends on EVENT_TRACING 89 depends on EVENT_TRACING
56 help 90 help
57 Say Y here to trace all commands, including TX frames and IO 91 Say Y here to trace all commands, including TX frames and IO
@@ -68,57 +102,9 @@ config IWLWIFI_DEVICE_TRACING
68 occur. 102 occur.
69endmenu 103endmenu
70 104
71config IWLAGN
72 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
73 depends on IWLWIFI
74 ---help---
75 Select to build the driver supporting the:
76
77 Intel Wireless WiFi Link Next-Gen AGN
78
79 This driver uses the kernel's mac80211 subsystem.
80
81 In order to use this driver, you will need a microcode (uCode)
82 image for it. You can obtain the microcode from:
83
84 <http://intellinuxwireless.org/>.
85
86 The microcode is typically installed in /lib/firmware. You can
87 look in the hotplug script /etc/hotplug/firmware.agent to
88 determine which directory FIRMWARE_DIR is set to when the script
89 runs.
90
91 If you want to compile the driver as a module ( = code which can be
92 inserted in and removed from the running kernel whenever you want),
93 say M here and read <file:Documentation/kbuild/modules.txt>. The
94 module will be called iwlagn.
95
96
97config IWL4965
98 bool "Intel Wireless WiFi 4965AGN"
99 depends on IWLAGN
100 ---help---
101 This option enables support for Intel Wireless WiFi Link 4965AGN
102
103config IWL5000
104 bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
105 depends on IWLAGN
106 ---help---
107 This option enables support for use with the following hardware:
108 Intel Wireless WiFi Link 6250AGN Adapter
109 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
110 Intel WiFi Link 1000BGN
111 Intel Wireless WiFi 5150AGN
112 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
113 Intel 6005 Series Wi-Fi Adapters
114 Intel 6030 Series Wi-Fi Adapters
115 Intel Wireless WiFi Link 6150BGN 2 Adapter
116 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
117 Intel 2000 Series Wi-Fi Adapters
118
119config IWL_P2P 105config IWL_P2P
120 bool "iwlwifi experimental P2P support" 106 bool "iwlwifi experimental P2P support"
121 depends on IWL5000 107 depends on IWLAGN
122 help 108 help
123 This option enables experimental P2P support for some devices 109 This option enables experimental P2P support for some devices
124 based on microcode support. Since P2P support is still under 110 based on microcode support. Since P2P support is still under
@@ -132,27 +118,3 @@ config IWL_P2P
132 118
133 Say Y only if you want to experiment with P2P. 119 Say Y only if you want to experiment with P2P.
134 120
135config IWL3945
136 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
137 depends on IWLWIFI
138 ---help---
139 Select to build the driver supporting the:
140
141 Intel PRO/Wireless 3945ABG/BG Network Connection
142
143 This driver uses the kernel's mac80211 subsystem.
144
145 In order to use this driver, you will need a microcode (uCode)
146 image for it. You can obtain the microcode from:
147
148 <http://intellinuxwireless.org/>.
149
150 The microcode is typically installed in /lib/firmware. You can
151 look in the hotplug script /etc/hotplug/firmware.agent to
152 determine which directory FIRMWARE_DIR is set to when the script
153 runs.
154
155 If you want to compile the driver as a module ( = code which can be
156 inserted in and removed from the running kernel whenever you want),
157 say M here and read <file:Documentation/kbuild/modules.txt>. The
158 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 25be742c69c9..9d6ee836426c 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,36 +1,23 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
6iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
7iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
8iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
9
10# If 3945 is selected only, iwl-legacy.o will be added
11# to iwlcore-m above, but it needs to be built in.
12iwlcore-objs += $(iwlcore-m)
13
14CFLAGS_iwl-devtrace.o := -I$(src)
15
16# AGN 1# AGN
17obj-$(CONFIG_IWLAGN) += iwlagn.o 2obj-$(CONFIG_IWLAGN) += iwlagn.o
18iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 3iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
19iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o 4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
20iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 5iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o
21iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
22iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
23 7
24iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
25iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o 9iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 10iwlagn-objs += iwl-scan.o iwl-led.o
27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 11iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 12iwlagn-objs += iwl-5000.o
29iwlagn-$(CONFIG_IWL5000) += iwl-2000.o 13iwlagn-objs += iwl-6000.o
14iwlagn-objs += iwl-1000.o
15iwlagn-objs += iwl-2000.o
16
17iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
18iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
19iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
30 20
31# 3945 21CFLAGS_iwl-devtrace.o := -I$(src)
32obj-$(CONFIG_IWL3945) += iwl3945.o
33iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
34iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
35 22
36ccflags-y += -D__CHECK_ENDIAN__ 23ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ba78bc8a259f..e8e1c2dc8659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
232 .bt_stats_read = iwl_ucode_bt_stats_read, 232 .bt_stats_read = iwl_ucode_bt_stats_read,
233 .reply_tx_error = iwl_reply_tx_error_read, 233 .reply_tx_error = iwl_reply_tx_error_read,
234 }, 234 },
235 .check_plcp_health = iwl_good_plcp_health,
236 .check_ack_health = iwl_good_ack_health,
237 .txfifo_flush = iwlagn_txfifo_flush, 235 .txfifo_flush = iwlagn_txfifo_flush,
238 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 236 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
239 .tt_ops = { 237 .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 30483e27ce5c..d7b6126408c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -315,8 +315,6 @@ static struct iwl_lib_ops iwl2000_lib = {
315 .bt_stats_read = iwl_ucode_bt_stats_read, 315 .bt_stats_read = iwl_ucode_bt_stats_read,
316 .reply_tx_error = iwl_reply_tx_error_read, 316 .reply_tx_error = iwl_reply_tx_error_read,
317 }, 317 },
318 .check_plcp_health = iwl_good_plcp_health,
319 .check_ack_health = iwl_good_ack_health,
320 .txfifo_flush = iwlagn_txfifo_flush, 318 .txfifo_flush = iwlagn_txfifo_flush,
321 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 319 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
322 .tt_ops = { 320 .tt_ops = {
@@ -418,6 +416,7 @@ static struct iwl_bt_params iwl2030_bt_params = {
418 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, 416 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
419 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, 417 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
420 .bt_sco_disable = true, 418 .bt_sco_disable = true,
419 .bt_session_2 = true,
421}; 420};
422 421
423#define IWL_DEVICE_2000 \ 422#define IWL_DEVICE_2000 \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6b1386..3ea31b659d1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
51#include "iwl-agn-debugfs.h" 51#include "iwl-agn-debugfs.h"
52 52
53/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
54#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 5
55#define IWL5150_UCODE_API_MAX 2 55#define IWL5150_UCODE_API_MAX 2
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
@@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
402 .bt_stats_read = iwl_ucode_bt_stats_read, 402 .bt_stats_read = iwl_ucode_bt_stats_read,
403 .reply_tx_error = iwl_reply_tx_error_read, 403 .reply_tx_error = iwl_reply_tx_error_read,
404 }, 404 },
405 .check_plcp_health = iwl_good_plcp_health,
406 .check_ack_health = iwl_good_ack_health,
407 .txfifo_flush = iwlagn_txfifo_flush, 405 .txfifo_flush = iwlagn_txfifo_flush,
408 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 406 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
409 .tt_ops = { 407 .tt_ops = {
@@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
471 .bt_stats_read = iwl_ucode_bt_stats_read, 469 .bt_stats_read = iwl_ucode_bt_stats_read,
472 .reply_tx_error = iwl_reply_tx_error_read, 470 .reply_tx_error = iwl_reply_tx_error_read,
473 }, 471 },
474 .check_plcp_health = iwl_good_plcp_health,
475 .check_ack_health = iwl_good_ack_health,
476 .txfifo_flush = iwlagn_txfifo_flush, 472 .txfifo_flush = iwlagn_txfifo_flush,
477 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 473 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
478 .tt_ops = { 474 .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f6493f77610d..a745b01c0ec1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
343 .bt_stats_read = iwl_ucode_bt_stats_read, 343 .bt_stats_read = iwl_ucode_bt_stats_read,
344 .reply_tx_error = iwl_reply_tx_error_read, 344 .reply_tx_error = iwl_reply_tx_error_read,
345 }, 345 },
346 .check_plcp_health = iwl_good_plcp_health,
347 .check_ack_health = iwl_good_ack_health,
348 .txfifo_flush = iwlagn_txfifo_flush, 346 .txfifo_flush = iwlagn_txfifo_flush,
349 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 347 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
350 .tt_ops = { 348 .tt_ops = {
@@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6030_lib = {
415 .bt_stats_read = iwl_ucode_bt_stats_read, 413 .bt_stats_read = iwl_ucode_bt_stats_read,
416 .reply_tx_error = iwl_reply_tx_error_read, 414 .reply_tx_error = iwl_reply_tx_error_read,
417 }, 415 },
418 .check_plcp_health = iwl_good_plcp_health,
419 .check_ack_health = iwl_good_ack_health,
420 .txfifo_flush = iwlagn_txfifo_flush, 416 .txfifo_flush = iwlagn_txfifo_flush,
421 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 417 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
422 .tt_ops = { 418 .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 325ff5c89ee8..fd142bee9189 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -609,6 +609,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
609struct iwl_mod_params iwlagn_mod_params = { 609struct iwl_mod_params iwlagn_mod_params = {
610 .amsdu_size_8K = 1, 610 .amsdu_size_8K = 1,
611 .restart_fw = 1, 611 .restart_fw = 1,
612 .plcp_check = true,
612 /* the rest are 0 by default */ 613 /* the rest are 0 by default */
613}; 614};
614 615
@@ -1173,7 +1174,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1173 1174
1174 /* TSF isn't reliable. In order to allow smooth user experience, 1175 /* TSF isn't reliable. In order to allow smooth user experience,
1175 * this W/A doesn't propagate it to the mac80211 */ 1176 * this W/A doesn't propagate it to the mac80211 */
1176 /*rx_status.flag |= RX_FLAG_TSFT;*/ 1177 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
1177 1178
1178 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 1179 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1179 1180
@@ -1804,26 +1805,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
1804 1805
1805void iwlagn_send_advance_bt_config(struct iwl_priv *priv) 1806void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1806{ 1807{
1807 struct iwlagn_bt_cmd bt_cmd = { 1808 struct iwl_basic_bt_cmd basic = {
1808 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT, 1809 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1809 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT, 1810 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1810 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, 1811 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1811 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, 1812 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1812 }; 1813 };
1814 struct iwl6000_bt_cmd bt_cmd_6000;
1815 struct iwl2000_bt_cmd bt_cmd_2000;
1816 int ret;
1813 1817
1814 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 1818 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1815 sizeof(bt_cmd.bt3_lookup_table)); 1819 sizeof(basic.bt3_lookup_table));
1816 1820
1817 if (priv->cfg->bt_params) 1821 if (priv->cfg->bt_params) {
1818 bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost; 1822 if (priv->cfg->bt_params->bt_session_2) {
1819 else 1823 bt_cmd_2000.prio_boost = cpu_to_le32(
1820 bt_cmd.prio_boost = 0; 1824 priv->cfg->bt_params->bt_prio_boost);
1821 bt_cmd.kill_ack_mask = priv->kill_ack_mask; 1825 bt_cmd_2000.tx_prio_boost = 0;
1822 bt_cmd.kill_cts_mask = priv->kill_cts_mask; 1826 bt_cmd_2000.rx_prio_boost = 0;
1827 } else {
1828 bt_cmd_6000.prio_boost =
1829 priv->cfg->bt_params->bt_prio_boost;
1830 bt_cmd_6000.tx_prio_boost = 0;
1831 bt_cmd_6000.rx_prio_boost = 0;
1832 }
1833 } else {
1834 IWL_ERR(priv, "failed to construct BT Coex Config\n");
1835 return;
1836 }
1823 1837
1824 bt_cmd.valid = priv->bt_valid; 1838 basic.kill_ack_mask = priv->kill_ack_mask;
1825 bt_cmd.tx_prio_boost = 0; 1839 basic.kill_cts_mask = priv->kill_cts_mask;
1826 bt_cmd.rx_prio_boost = 0; 1840 basic.valid = priv->bt_valid;
1827 1841
1828 /* 1842 /*
1829 * Configure BT coex mode to "no coexistence" when the 1843 * Configure BT coex mode to "no coexistence" when the
@@ -1832,32 +1846,43 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1832 * IBSS mode (no proper uCode support for coex then). 1846 * IBSS mode (no proper uCode support for coex then).
1833 */ 1847 */
1834 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { 1848 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1835 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; 1849 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1836 } else { 1850 } else {
1837 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << 1851 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1838 IWLAGN_BT_FLAG_COEX_MODE_SHIFT; 1852 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1839 if (priv->cfg->bt_params && 1853 if (priv->cfg->bt_params &&
1840 priv->cfg->bt_params->bt_sco_disable) 1854 priv->cfg->bt_params->bt_sco_disable)
1841 bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; 1855 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1842 1856
1843 if (priv->bt_ch_announce) 1857 if (priv->bt_ch_announce)
1844 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; 1858 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1845 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags); 1859 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
1846 } 1860 }
1847 priv->bt_enable_flag = bt_cmd.flags; 1861 priv->bt_enable_flag = basic.flags;
1848 if (priv->bt_full_concurrent) 1862 if (priv->bt_full_concurrent)
1849 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup, 1863 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
1850 sizeof(iwlagn_concurrent_lookup)); 1864 sizeof(iwlagn_concurrent_lookup));
1851 else 1865 else
1852 memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup, 1866 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
1853 sizeof(iwlagn_def_3w_lookup)); 1867 sizeof(iwlagn_def_3w_lookup));
1854 1868
1855 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n", 1869 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1856 bt_cmd.flags ? "active" : "disabled", 1870 basic.flags ? "active" : "disabled",
1857 priv->bt_full_concurrent ? 1871 priv->bt_full_concurrent ?
1858 "full concurrency" : "3-wire"); 1872 "full concurrency" : "3-wire");
1859 1873
1860 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd)) 1874 if (priv->cfg->bt_params->bt_session_2) {
1875 memcpy(&bt_cmd_2000.basic, &basic,
1876 sizeof(basic));
1877 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1878 sizeof(bt_cmd_2000), &bt_cmd_2000);
1879 } else {
1880 memcpy(&bt_cmd_6000.basic, &basic,
1881 sizeof(basic));
1882 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1883 sizeof(bt_cmd_6000), &bt_cmd_6000);
1884 }
1885 if (ret)
1861 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1886 IWL_ERR(priv, "failed to send BT Coex Config\n");
1862 1887
1863} 1888}
@@ -1984,12 +2009,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1984 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> 2009 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1985 BT_UART_MSG_FRAME6DISCOVERABLE_POS); 2010 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1986 2011
1987 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = " 2012 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
1988 "0x%X, Connectable = 0x%X", 2013 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1989 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> 2014 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1990 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, 2015 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1991 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >> 2016 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
1992 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS, 2017 BT_UART_MSG_FRAME7PAGE_POS,
2018 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
2019 BT_UART_MSG_FRAME7INQUIRY_POS,
1993 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >> 2020 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1994 BT_UART_MSG_FRAME7CONNECTABLE_POS); 2021 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1995} 2022}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 6c2adc58d654..dfdbea6e8f99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -471,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
471 struct iwl_rxon_context *tmp; 471 struct iwl_rxon_context *tmp;
472 struct ieee80211_sta *sta; 472 struct ieee80211_sta *sta;
473 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 473 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
474 struct ieee80211_sta_ht_cap *ht_cap;
474 bool need_multiple; 475 bool need_multiple;
475 476
476 lockdep_assert_held(&priv->mutex); 477 lockdep_assert_held(&priv->mutex);
@@ -479,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
479 case NL80211_IFTYPE_STATION: 480 case NL80211_IFTYPE_STATION:
480 rcu_read_lock(); 481 rcu_read_lock();
481 sta = ieee80211_find_sta(vif, bss_conf->bssid); 482 sta = ieee80211_find_sta(vif, bss_conf->bssid);
482 if (sta) { 483 if (!sta) {
483 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
484 int maxstreams;
485
486 maxstreams = (ht_cap->mcs.tx_params &
487 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
488 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
489 maxstreams += 1;
490
491 need_multiple = true;
492
493 if ((ht_cap->mcs.rx_mask[1] == 0) &&
494 (ht_cap->mcs.rx_mask[2] == 0))
495 need_multiple = false;
496 if (maxstreams <= 1)
497 need_multiple = false;
498 } else {
499 /* 484 /*
500 * If at all, this can only happen through a race 485 * If at all, this can only happen through a race
501 * when the AP disconnects us while we're still 486 * when the AP disconnects us while we're still
@@ -503,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
503 * will soon tell us about that. 488 * will soon tell us about that.
504 */ 489 */
505 need_multiple = false; 490 need_multiple = false;
491 rcu_read_unlock();
492 break;
493 }
494
495 ht_cap = &sta->ht_cap;
496
497 need_multiple = true;
498
499 /*
500 * If the peer advertises no support for receiving 2 and 3
501 * stream MCS rates, it can't be transmitting them either.
502 */
503 if (ht_cap->mcs.rx_mask[1] == 0 &&
504 ht_cap->mcs.rx_mask[2] == 0) {
505 need_multiple = false;
506 } else if (!(ht_cap->mcs.tx_params &
507 IEEE80211_HT_MCS_TX_DEFINED)) {
508 /* If it can't TX MCS at all ... */
509 need_multiple = false;
510 } else if (ht_cap->mcs.tx_params &
511 IEEE80211_HT_MCS_TX_RX_DIFF) {
512 int maxstreams;
513
514 /*
515 * But if it can receive them, it might still not
516 * be able to transmit them, which is what we need
517 * to check here -- so check the number of streams
518 * it advertises for TX (if different from RX).
519 */
520
521 maxstreams = (ht_cap->mcs.tx_params &
522 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
523 maxstreams >>=
524 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
525 maxstreams += 1;
526
527 if (maxstreams <= 1)
528 need_multiple = false;
506 } 529 }
530
507 rcu_read_unlock(); 531 rcu_read_unlock();
508 break; 532 break;
509 case NL80211_IFTYPE_ADHOC: 533 case NL80211_IFTYPE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 266490d8a397..a709d05c5868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -947,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
947 */ 947 */
948void iwlagn_txq_ctx_stop(struct iwl_priv *priv) 948void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
949{ 949{
950 int ch; 950 int ch, txq_id;
951 unsigned long flags; 951 unsigned long flags;
952 952
953 /* Turn off all Tx DMA fifos */ 953 /* Turn off all Tx DMA fifos */
@@ -966,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
966 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 966 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
967 } 967 }
968 spin_unlock_irqrestore(&priv->lock, flags); 968 spin_unlock_irqrestore(&priv->lock, flags);
969
970 if (!priv->txq)
971 return;
972
973 /* Unmap DMA from host system and free skb's */
974 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
975 if (txq_id == priv->cmd_queue)
976 iwl_cmd_queue_unmap(priv);
977 else
978 iwl_tx_queue_unmap(priv, txq_id);
969} 979}
970 980
971/* 981/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index abd0461bd307..f189bbe78fa6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -86,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION); 86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90 89
91static int iwlagn_ant_coupling; 90static int iwlagn_ant_coupling;
92static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
@@ -466,6 +465,15 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
466 IWL_WARN(priv, "%s uCode did not respond OK.\n", 465 IWL_WARN(priv, "%s uCode did not respond OK.\n",
467 (palive->ver_subtype == INITIALIZE_SUBTYPE) ? 466 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
468 "init" : "runtime"); 467 "init" : "runtime");
468 /*
469 * If fail to load init uCode,
470 * let's try to load the init uCode again.
471 * We should not get into this situation, but if it
472 * does happen, we should not move on and loading "runtime"
473 * without proper calibrate the device.
474 */
475 if (palive->ver_subtype == INITIALIZE_SUBTYPE)
476 priv->ucode_type = UCODE_NONE;
469 queue_work(priv->workqueue, &priv->restart); 477 queue_work(priv->workqueue, &priv->restart);
470 } 478 }
471} 479}
@@ -1405,72 +1413,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1405 iwl_enable_rfkill_int(priv); 1413 iwl_enable_rfkill_int(priv);
1406} 1414}
1407 1415
1408/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
1409#define ACK_CNT_RATIO (50)
1410#define BA_TIMEOUT_CNT (5)
1411#define BA_TIMEOUT_MAX (16)
1412
1413/**
1414 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
1415 *
1416 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
1417 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
1418 * operation state.
1419 */
1420bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
1421{
1422 int actual_delta, expected_delta, ba_timeout_delta;
1423 struct statistics_tx *cur, *old;
1424
1425 if (priv->_agn.agg_tids_count)
1426 return true;
1427
1428 if (iwl_bt_statistics(priv)) {
1429 cur = &pkt->u.stats_bt.tx;
1430 old = &priv->_agn.statistics_bt.tx;
1431 } else {
1432 cur = &pkt->u.stats.tx;
1433 old = &priv->_agn.statistics.tx;
1434 }
1435
1436 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
1437 le32_to_cpu(old->actual_ack_cnt);
1438 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
1439 le32_to_cpu(old->expected_ack_cnt);
1440
1441 /* Values should not be negative, but we do not trust the firmware */
1442 if (actual_delta <= 0 || expected_delta <= 0)
1443 return true;
1444
1445 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
1446 le32_to_cpu(old->agg.ba_timeout);
1447
1448 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
1449 ba_timeout_delta > BA_TIMEOUT_CNT) {
1450 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
1451 actual_delta, expected_delta, ba_timeout_delta);
1452
1453#ifdef CONFIG_IWLWIFI_DEBUGFS
1454 /*
1455 * This is ifdef'ed on DEBUGFS because otherwise the
1456 * statistics aren't available. If DEBUGFS is set but
1457 * DEBUG is not, these will just compile out.
1458 */
1459 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
1460 priv->_agn.delta_statistics.tx.rx_detected_cnt);
1461 IWL_DEBUG_RADIO(priv,
1462 "ack_or_ba_timeout_collision delta %d\n",
1463 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
1464#endif
1465
1466 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
1467 return false;
1468 }
1469
1470 return true;
1471}
1472
1473
1474/***************************************************************************** 1416/*****************************************************************************
1475 * 1417 *
1476 * sysfs attributes 1418 * sysfs attributes
@@ -2735,9 +2677,11 @@ static void iwl_alive_start(struct iwl_priv *priv)
2735 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2677 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2736 } 2678 }
2737 2679
2738 if (priv->cfg->bt_params && 2680 if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
2739 !priv->cfg->bt_params->advanced_bt_coexist) { 2681 !priv->cfg->bt_params->advanced_bt_coexist)) {
2740 /* Configure Bluetooth device coexistence support */ 2682 /*
2683 * default is 2-wire BT coexexistence support
2684 */
2741 priv->cfg->ops->hcmd->send_bt_config(priv); 2685 priv->cfg->ops->hcmd->send_bt_config(priv);
2742 } 2686 }
2743 2687
@@ -3320,7 +3264,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
3320 IWL_DEBUG_MAC80211(priv, "leave\n"); 3264 IWL_DEBUG_MAC80211(priv, "leave\n");
3321} 3265}
3322 3266
3323int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3267void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3324{ 3268{
3325 struct iwl_priv *priv = hw->priv; 3269 struct iwl_priv *priv = hw->priv;
3326 3270
@@ -3333,7 +3277,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3333 dev_kfree_skb_any(skb); 3277 dev_kfree_skb_any(skb);
3334 3278
3335 IWL_DEBUG_MACDUMP(priv, "leave\n"); 3279 IWL_DEBUG_MACDUMP(priv, "leave\n");
3336 return NETDEV_TX_OK;
3337} 3280}
3338 3281
3339void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 3282void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3799,7 +3742,6 @@ static void iwlagn_bg_roc_done(struct work_struct *work)
3799 mutex_unlock(&priv->mutex); 3742 mutex_unlock(&priv->mutex);
3800} 3743}
3801 3744
3802#ifdef CONFIG_IWL5000
3803static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, 3745static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3804 struct ieee80211_channel *channel, 3746 struct ieee80211_channel *channel,
3805 enum nl80211_channel_type channel_type, 3747 enum nl80211_channel_type channel_type,
@@ -3855,7 +3797,6 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3855 3797
3856 return 0; 3798 return 0;
3857} 3799}
3858#endif
3859 3800
3860/***************************************************************************** 3801/*****************************************************************************
3861 * 3802 *
@@ -4025,7 +3966,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
4025 kfree(priv->scan_cmd); 3966 kfree(priv->scan_cmd);
4026} 3967}
4027 3968
4028#ifdef CONFIG_IWL5000
4029struct ieee80211_ops iwlagn_hw_ops = { 3969struct ieee80211_ops iwlagn_hw_ops = {
4030 .tx = iwlagn_mac_tx, 3970 .tx = iwlagn_mac_tx,
4031 .start = iwlagn_mac_start, 3971 .start = iwlagn_mac_start,
@@ -4050,13 +3990,12 @@ struct ieee80211_ops iwlagn_hw_ops = {
4050 .remain_on_channel = iwl_mac_remain_on_channel, 3990 .remain_on_channel = iwl_mac_remain_on_channel,
4051 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, 3991 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
4052}; 3992};
4053#endif
4054 3993
4055static void iwl_hw_detect(struct iwl_priv *priv) 3994static void iwl_hw_detect(struct iwl_priv *priv)
4056{ 3995{
4057 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); 3996 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
4058 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); 3997 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
4059 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); 3998 priv->rev_id = priv->pci_dev->revision;
4060 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 3999 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
4061} 4000}
4062 4001
@@ -4118,12 +4057,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4118 if (cfg->mod_params->disable_hw_scan) { 4057 if (cfg->mod_params->disable_hw_scan) {
4119 dev_printk(KERN_DEBUG, &(pdev->dev), 4058 dev_printk(KERN_DEBUG, &(pdev->dev),
4120 "sw scan support is deprecated\n"); 4059 "sw scan support is deprecated\n");
4121#ifdef CONFIG_IWL5000
4122 iwlagn_hw_ops.hw_scan = NULL; 4060 iwlagn_hw_ops.hw_scan = NULL;
4123#endif
4124#ifdef CONFIG_IWL4965
4125 iwl4965_hw_ops.hw_scan = NULL;
4126#endif
4127 } 4061 }
4128 4062
4129 hw = iwl_alloc_all(cfg); 4063 hw = iwl_alloc_all(cfg);
@@ -4502,12 +4436,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4502 4436
4503/* Hardware specific file defines the PCI IDs table for that hardware module */ 4437/* Hardware specific file defines the PCI IDs table for that hardware module */
4504static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { 4438static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4505#ifdef CONFIG_IWL4965
4506 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4507 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4508#endif /* CONFIG_IWL4965 */
4509#ifdef CONFIG_IWL5000
4510/* 5100 Series WiFi */
4511 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ 4439 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
4512 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ 4440 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
4513 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ 4441 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4693,8 +4621,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4693 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)}, 4621 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
4694 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)}, 4622 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
4695 4623
4696#endif /* CONFIG_IWL5000 */
4697
4698 {0} 4624 {0}
4699}; 4625};
4700MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 4626MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
@@ -4793,3 +4719,9 @@ MODULE_PARM_DESC(antenna_coupling,
4793module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO); 4719module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
4794MODULE_PARM_DESC(bt_ch_inhibition, 4720MODULE_PARM_DESC(bt_ch_inhibition,
4795 "Disable BT channel inhibition (default: enable)"); 4721 "Disable BT channel inhibition (default: enable)");
4722
4723module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
4724MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
4725
4726module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
4727MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index d00e1ea50a8d..b5a169be48e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -121,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv);
121int iwl_alloc_isr_ict(struct iwl_priv *priv); 121int iwl_alloc_isr_ict(struct iwl_priv *priv);
122void iwl_free_isr_ict(struct iwl_priv *priv); 122void iwl_free_isr_ict(struct iwl_priv *priv);
123irqreturn_t iwl_isr_ict(int irq, void *data); 123irqreturn_t iwl_isr_ict(int irq, void *data);
124bool iwl_good_ack_health(struct iwl_priv *priv,
125 struct iwl_rx_packet *pkt);
126 124
127/* tx queue */ 125/* tx queue */
128void iwlagn_set_wr_ptrs(struct iwl_priv *priv, 126void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -248,8 +246,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
248/* rx */ 246/* rx */
249void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 247void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
250 struct iwl_rx_mem_buffer *rxb); 248 struct iwl_rx_mem_buffer *rxb);
251bool iwl_good_plcp_health(struct iwl_priv *priv,
252 struct iwl_rx_packet *pkt);
253void iwl_rx_statistics(struct iwl_priv *priv, 249void iwl_rx_statistics(struct iwl_priv *priv,
254 struct iwl_rx_mem_buffer *rxb); 250 struct iwl_rx_mem_buffer *rxb);
255void iwl_reply_statistics(struct iwl_priv *priv, 251void iwl_reply_statistics(struct iwl_priv *priv,
@@ -356,7 +352,7 @@ iwlagn_remove_notification(struct iwl_priv *priv,
356 struct iwl_notification_wait *wait_entry); 352 struct iwl_notification_wait *wait_entry);
357 353
358/* mac80211 handlers (for 4965) */ 354/* mac80211 handlers (for 4965) */
359int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 355void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
360int iwlagn_mac_start(struct ieee80211_hw *hw); 356int iwlagn_mac_start(struct ieee80211_hw *hw);
361void iwlagn_mac_stop(struct ieee80211_hw *hw); 357void iwlagn_mac_stop(struct ieee80211_hw *hw);
362void iwlagn_configure_filter(struct ieee80211_hw *hw, 358void iwlagn_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 0a1d4aeb36aa..03cfb74da2bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2477,7 +2477,7 @@ struct iwl_bt_cmd {
2477 IWLAGN_BT_VALID_BT4_TIMES | \ 2477 IWLAGN_BT_VALID_BT4_TIMES | \
2478 IWLAGN_BT_VALID_3W_LUT) 2478 IWLAGN_BT_VALID_3W_LUT)
2479 2479
2480struct iwlagn_bt_cmd { 2480struct iwl_basic_bt_cmd {
2481 u8 flags; 2481 u8 flags;
2482 u8 ledtime; /* unused */ 2482 u8 ledtime; /* unused */
2483 u8 max_kill; 2483 u8 max_kill;
@@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd {
2490 __le32 bt3_lookup_table[12]; 2490 __le32 bt3_lookup_table[12];
2491 __le16 bt4_decision_time; /* unused */ 2491 __le16 bt4_decision_time; /* unused */
2492 __le16 valid; 2492 __le16 valid;
2493};
2494
2495struct iwl6000_bt_cmd {
2496 struct iwl_basic_bt_cmd basic;
2493 u8 prio_boost; 2497 u8 prio_boost;
2494 /* 2498 /*
2495 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask 2499 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd {
2499 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ 2503 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2500}; 2504};
2501 2505
2506struct iwl2000_bt_cmd {
2507 struct iwl_basic_bt_cmd basic;
2508 __le32 prio_boost;
2509 /*
2510 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2511 * if configure the following patterns
2512 */
2513 u8 reserved;
2514 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2515 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2516};
2517
2502#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) 2518#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
2503 2519
2504struct iwlagn_bt_sco_cmd { 2520struct iwlagn_bt_sco_cmd {
@@ -4150,6 +4166,10 @@ enum iwl_bt_coex_profile_traffic_load {
4150 */ 4166 */
4151}; 4167};
4152 4168
4169#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
4170#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
4171
4172/* BT UART message - Share Part (BT -> WiFi) */
4153#define BT_UART_MSG_FRAME1MSGTYPE_POS (0) 4173#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
4154#define BT_UART_MSG_FRAME1MSGTYPE_MSK \ 4174#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
4155 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS) 4175 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4234,9 +4254,12 @@ enum iwl_bt_coex_profile_traffic_load {
4234#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0) 4254#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
4235#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \ 4255#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
4236 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS) 4256 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
4237#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3) 4257#define BT_UART_MSG_FRAME7PAGE_POS (3)
4238#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \ 4258#define BT_UART_MSG_FRAME7PAGE_MSK \
4239 (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS) 4259 (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
4260#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
4261#define BT_UART_MSG_FRAME7INQUIRY_MSK \
4262 (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
4240#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5) 4263#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
4241#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \ 4264#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
4242 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS) 4265 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4244,6 +4267,83 @@ enum iwl_bt_coex_profile_traffic_load {
4244#define BT_UART_MSG_FRAME7RESERVED_MSK \ 4267#define BT_UART_MSG_FRAME7RESERVED_MSK \
4245 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS) 4268 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
4246 4269
4270/* BT Session Activity 2 UART message (BT -> WiFi) */
4271#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
4272#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
4273 (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
4274#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
4275#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
4276 (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
4277
4278#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
4279#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
4280 (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
4281#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
4282#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
4283 (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
4284
4285#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
4286#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
4287 (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
4288#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
4289#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
4290 (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
4291#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
4292#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
4293 (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
4294#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
4295#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
4296 (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
4297
4298#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
4299#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
4300 (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
4301#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
4302#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
4303 (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
4304#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
4305#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
4306 (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
4307
4308#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
4309#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
4310 (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
4311#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
4312#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
4313 (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
4314#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
4315#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
4316 (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
4317#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
4318#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
4319 (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
4320
4321#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
4322#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
4323 (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
4324#define BT_UART_MSG_2_FRAME6RFU_POS (5)
4325#define BT_UART_MSG_2_FRAME6RFU_MSK \
4326 (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
4327#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
4328#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
4329 (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
4330
4331#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
4332#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
4333 (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
4334#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
4335#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
4336 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
4337#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
4338#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
4339 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
4340#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
4341#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
4342 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
4343#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
4344#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
4345 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
4346
4247 4347
4248struct iwl_bt_uart_msg { 4348struct iwl_bt_uart_msg {
4249 u8 header; 4349 u8 header;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4ad89389a0a9..4bd342060254 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -43,11 +43,6 @@
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45 45
46MODULE_DESCRIPTION("iwl core");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/* 46/*
52 * set bt_coex_active to true, uCode will do kill/defer 47 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the 48 * every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
65 * default: bt_coex_active = true (BT_COEX_ENABLE) 60 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */ 61 */
67bool bt_coex_active = true; 62bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
69module_param(bt_coex_active, bool, S_IRUGO); 63module_param(bt_coex_active, bool, S_IRUGO);
70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 64MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71 65
72u32 iwl_debug_level; 66u32 iwl_debug_level;
73EXPORT_SYMBOL(iwl_debug_level);
74 67
75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 68const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76EXPORT_SYMBOL(iwl_bcast_addr);
77 69
78 70
79/* This function both allocates and initializes hw and priv. */ 71/* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
98out: 90out:
99 return hw; 91 return hw;
100} 92}
101EXPORT_SYMBOL(iwl_alloc_all);
102 93
103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 94#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 95#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -272,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
272 263
273 return 0; 264 return 0;
274} 265}
275EXPORT_SYMBOL(iwlcore_init_geos);
276 266
277/* 267/*
278 * iwlcore_free_geos - undo allocations in iwlcore_init_geos 268 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -283,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
283 kfree(priv->ieee_rates); 273 kfree(priv->ieee_rates);
284 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 274 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
285} 275}
286EXPORT_SYMBOL(iwlcore_free_geos);
287 276
288static bool iwl_is_channel_extension(struct iwl_priv *priv, 277static bool iwl_is_channel_extension(struct iwl_priv *priv,
289 enum ieee80211_band band, 278 enum ieee80211_band band,
@@ -328,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
328 le16_to_cpu(ctx->staging.channel), 317 le16_to_cpu(ctx->staging.channel),
329 ctx->ht.extension_chan_offset); 318 ctx->ht.extension_chan_offset);
330} 319}
331EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
332 320
333static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 321static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
334{ 322{
@@ -429,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
429 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 417 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
430 sizeof(ctx->timing), &ctx->timing); 418 sizeof(ctx->timing), &ctx->timing);
431} 419}
432EXPORT_SYMBOL(iwl_send_rxon_timing);
433 420
434void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 421void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
435 int hw_decrypt) 422 int hw_decrypt)
@@ -442,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
442 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 429 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
443 430
444} 431}
445EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
446 432
447/* validate RXON structure is valid */ 433/* validate RXON structure is valid */
448int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 434int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -515,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
515 } 501 }
516 return 0; 502 return 0;
517} 503}
518EXPORT_SYMBOL(iwl_check_rxon_cmd);
519 504
520/** 505/**
521 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 506 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -579,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
579 564
580 return 0; 565 return 0;
581} 566}
582EXPORT_SYMBOL(iwl_full_rxon_required);
583 567
584u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, 568u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
585 struct iwl_rxon_context *ctx) 569 struct iwl_rxon_context *ctx)
@@ -593,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
593 else 577 else
594 return IWL_RATE_6M_PLCP; 578 return IWL_RATE_6M_PLCP;
595} 579}
596EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
597 580
598static void _iwl_set_rxon_ht(struct iwl_priv *priv, 581static void _iwl_set_rxon_ht(struct iwl_priv *priv,
599 struct iwl_ht_config *ht_conf, 582 struct iwl_ht_config *ht_conf,
@@ -670,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
670 for_each_context(priv, ctx) 653 for_each_context(priv, ctx)
671 _iwl_set_rxon_ht(priv, ht_conf, ctx); 654 _iwl_set_rxon_ht(priv, ht_conf, ctx);
672} 655}
673EXPORT_SYMBOL(iwl_set_rxon_ht);
674 656
675/* Return valid, unused, channel for a passive scan to reset the RF */ 657/* Return valid, unused, channel for a passive scan to reset the RF */
676u8 iwl_get_single_channel_number(struct iwl_priv *priv, 658u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -711,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
711 693
712 return channel; 694 return channel;
713} 695}
714EXPORT_SYMBOL(iwl_get_single_channel_number);
715 696
716/** 697/**
717 * iwl_set_rxon_channel - Set the band and channel values in staging RXON 698 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -742,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
742 723
743 return 0; 724 return 0;
744} 725}
745EXPORT_SYMBOL(iwl_set_rxon_channel);
746 726
747void iwl_set_flags_for_band(struct iwl_priv *priv, 727void iwl_set_flags_for_band(struct iwl_priv *priv,
748 struct iwl_rxon_context *ctx, 728 struct iwl_rxon_context *ctx,
@@ -766,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
766 ctx->staging.flags &= ~RXON_FLG_CCK_MSK; 746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
767 } 747 }
768} 748}
769EXPORT_SYMBOL(iwl_set_flags_for_band);
770 749
771/* 750/*
772 * initialize rxon structure with default values from eeprom 751 * initialize rxon structure with default values from eeprom
@@ -838,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
838 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 817 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
839 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; 818 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
840} 819}
841EXPORT_SYMBOL(iwl_connection_init_rx_config);
842 820
843void iwl_set_rate(struct iwl_priv *priv) 821void iwl_set_rate(struct iwl_priv *priv)
844{ 822{
@@ -871,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
871 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 849 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
872 } 850 }
873} 851}
874EXPORT_SYMBOL(iwl_set_rate);
875 852
876void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) 853void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
877{ 854{
@@ -891,7 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
891 mutex_unlock(&priv->mutex); 868 mutex_unlock(&priv->mutex);
892 } 869 }
893} 870}
894EXPORT_SYMBOL(iwl_chswitch_done);
895 871
896void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 872void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
897{ 873{
@@ -919,7 +895,6 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
919 } 895 }
920 } 896 }
921} 897}
922EXPORT_SYMBOL(iwl_rx_csa);
923 898
924#ifdef CONFIG_IWLWIFI_DEBUG 899#ifdef CONFIG_IWLWIFI_DEBUG
925void iwl_print_rx_config_cmd(struct iwl_priv *priv, 900void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -941,13 +916,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
941 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 916 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
942 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 917 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
943} 918}
944EXPORT_SYMBOL(iwl_print_rx_config_cmd);
945#endif 919#endif
946/** 920/**
947 * iwl_irq_handle_error - called for HW or SW error interrupt from card 921 * iwl_irq_handle_error - called for HW or SW error interrupt from card
948 */ 922 */
949void iwl_irq_handle_error(struct iwl_priv *priv) 923void iwl_irq_handle_error(struct iwl_priv *priv)
950{ 924{
925 unsigned int reload_msec;
926 unsigned long reload_jiffies;
927
951 /* Set the FW error flag -- cleared on iwl_down */ 928 /* Set the FW error flag -- cleared on iwl_down */
952 set_bit(STATUS_FW_ERROR, &priv->status); 929 set_bit(STATUS_FW_ERROR, &priv->status);
953 930
@@ -991,6 +968,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
991 * commands by clearing the INIT status bit */ 968 * commands by clearing the INIT status bit */
992 clear_bit(STATUS_READY, &priv->status); 969 clear_bit(STATUS_READY, &priv->status);
993 970
971 /*
972 * If firmware keep reloading, then it indicate something
973 * serious wrong and firmware having problem to recover
974 * from it. Instead of keep trying which will fill the syslog
975 * and hang the system, let's just stop it
976 */
977 reload_jiffies = jiffies;
978 reload_msec = jiffies_to_msecs((long) reload_jiffies -
979 (long) priv->reload_jiffies);
980 priv->reload_jiffies = reload_jiffies;
981 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
982 priv->reload_count++;
983 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
984 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
985 return;
986 }
987 } else
988 priv->reload_count = 0;
989
994 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 990 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
995 IWL_DEBUG(priv, IWL_DL_FW_ERRORS, 991 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
996 "Restarting adapter due to uCode error.\n"); 992 "Restarting adapter due to uCode error.\n");
@@ -999,7 +995,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
999 queue_work(priv->workqueue, &priv->restart); 995 queue_work(priv->workqueue, &priv->restart);
1000 } 996 }
1001} 997}
1002EXPORT_SYMBOL(iwl_irq_handle_error);
1003 998
1004static int iwl_apm_stop_master(struct iwl_priv *priv) 999static int iwl_apm_stop_master(struct iwl_priv *priv)
1005{ 1000{
@@ -1036,7 +1031,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
1036 */ 1031 */
1037 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1032 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1038} 1033}
1039EXPORT_SYMBOL(iwl_apm_stop);
1040 1034
1041 1035
1042/* 1036/*
@@ -1151,7 +1145,6 @@ int iwl_apm_init(struct iwl_priv *priv)
1151out: 1145out:
1152 return ret; 1146 return ret;
1153} 1147}
1154EXPORT_SYMBOL(iwl_apm_init);
1155 1148
1156 1149
1157int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1150int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
@@ -1211,7 +1204,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1211 } 1204 }
1212 return ret; 1205 return ret;
1213} 1206}
1214EXPORT_SYMBOL(iwl_set_tx_power);
1215 1207
1216void iwl_send_bt_config(struct iwl_priv *priv) 1208void iwl_send_bt_config(struct iwl_priv *priv)
1217{ 1209{
@@ -1235,7 +1227,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
1235 sizeof(struct iwl_bt_cmd), &bt_cmd)) 1227 sizeof(struct iwl_bt_cmd), &bt_cmd))
1236 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1228 IWL_ERR(priv, "failed to send BT Coex Config\n");
1237} 1229}
1238EXPORT_SYMBOL(iwl_send_bt_config);
1239 1230
1240int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) 1231int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1241{ 1232{
@@ -1253,7 +1244,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1253 sizeof(struct iwl_statistics_cmd), 1244 sizeof(struct iwl_statistics_cmd),
1254 &statistics_cmd); 1245 &statistics_cmd);
1255} 1246}
1256EXPORT_SYMBOL(iwl_send_statistics_request);
1257 1247
1258void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 1248void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1259 struct iwl_rx_mem_buffer *rxb) 1249 struct iwl_rx_mem_buffer *rxb)
@@ -1265,7 +1255,6 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1265 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1255 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1266#endif 1256#endif
1267} 1257}
1268EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1269 1258
1270void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1259void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1271 struct iwl_rx_mem_buffer *rxb) 1260 struct iwl_rx_mem_buffer *rxb)
@@ -1277,7 +1266,6 @@ void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1277 get_cmd_string(pkt->hdr.cmd)); 1266 get_cmd_string(pkt->hdr.cmd));
1278 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); 1267 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1279} 1268}
1280EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
1281 1269
1282void iwl_rx_reply_error(struct iwl_priv *priv, 1270void iwl_rx_reply_error(struct iwl_priv *priv,
1283 struct iwl_rx_mem_buffer *rxb) 1271 struct iwl_rx_mem_buffer *rxb)
@@ -1292,7 +1280,6 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
1292 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 1280 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1293 le32_to_cpu(pkt->u.err_resp.error_info)); 1281 le32_to_cpu(pkt->u.err_resp.error_info));
1294} 1282}
1295EXPORT_SYMBOL(iwl_rx_reply_error);
1296 1283
1297void iwl_clear_isr_stats(struct iwl_priv *priv) 1284void iwl_clear_isr_stats(struct iwl_priv *priv)
1298{ 1285{
@@ -1344,7 +1331,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1344 IWL_DEBUG_MAC80211(priv, "leave\n"); 1331 IWL_DEBUG_MAC80211(priv, "leave\n");
1345 return 0; 1332 return 0;
1346} 1333}
1347EXPORT_SYMBOL(iwl_mac_conf_tx);
1348 1334
1349int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw) 1335int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1350{ 1336{
@@ -1352,7 +1338,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1352 1338
1353 return priv->ibss_manager == IWL_IBSS_MANAGER; 1339 return priv->ibss_manager == IWL_IBSS_MANAGER;
1354} 1340}
1355EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1356 1341
1357static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1342static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1358{ 1343{
@@ -1462,7 +1447,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1462 IWL_DEBUG_MAC80211(priv, "leave\n"); 1447 IWL_DEBUG_MAC80211(priv, "leave\n");
1463 return err; 1448 return err;
1464} 1449}
1465EXPORT_SYMBOL(iwl_mac_add_interface);
1466 1450
1467static void iwl_teardown_interface(struct iwl_priv *priv, 1451static void iwl_teardown_interface(struct iwl_priv *priv,
1468 struct ieee80211_vif *vif, 1452 struct ieee80211_vif *vif,
@@ -1515,7 +1499,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1515 IWL_DEBUG_MAC80211(priv, "leave\n"); 1499 IWL_DEBUG_MAC80211(priv, "leave\n");
1516 1500
1517} 1501}
1518EXPORT_SYMBOL(iwl_mac_remove_interface);
1519 1502
1520int iwl_alloc_txq_mem(struct iwl_priv *priv) 1503int iwl_alloc_txq_mem(struct iwl_priv *priv)
1521{ 1504{
@@ -1530,14 +1513,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
1530 } 1513 }
1531 return 0; 1514 return 0;
1532} 1515}
1533EXPORT_SYMBOL(iwl_alloc_txq_mem);
1534 1516
1535void iwl_free_txq_mem(struct iwl_priv *priv) 1517void iwl_free_txq_mem(struct iwl_priv *priv)
1536{ 1518{
1537 kfree(priv->txq); 1519 kfree(priv->txq);
1538 priv->txq = NULL; 1520 priv->txq = NULL;
1539} 1521}
1540EXPORT_SYMBOL(iwl_free_txq_mem);
1541 1522
1542#ifdef CONFIG_IWLWIFI_DEBUGFS 1523#ifdef CONFIG_IWLWIFI_DEBUGFS
1543 1524
@@ -1576,7 +1557,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1576 iwl_reset_traffic_log(priv); 1557 iwl_reset_traffic_log(priv);
1577 return 0; 1558 return 0;
1578} 1559}
1579EXPORT_SYMBOL(iwl_alloc_traffic_mem);
1580 1560
1581void iwl_free_traffic_mem(struct iwl_priv *priv) 1561void iwl_free_traffic_mem(struct iwl_priv *priv)
1582{ 1562{
@@ -1586,7 +1566,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
1586 kfree(priv->rx_traffic); 1566 kfree(priv->rx_traffic);
1587 priv->rx_traffic = NULL; 1567 priv->rx_traffic = NULL;
1588} 1568}
1589EXPORT_SYMBOL(iwl_free_traffic_mem);
1590 1569
1591void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, 1570void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1592 u16 length, struct ieee80211_hdr *header) 1571 u16 length, struct ieee80211_hdr *header)
@@ -1611,7 +1590,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1611 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1590 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1612 } 1591 }
1613} 1592}
1614EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
1615 1593
1616void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, 1594void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1617 u16 length, struct ieee80211_hdr *header) 1595 u16 length, struct ieee80211_hdr *header)
@@ -1636,7 +1614,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1636 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1614 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1637 } 1615 }
1638} 1616}
1639EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
1640 1617
1641const char *get_mgmt_string(int cmd) 1618const char *get_mgmt_string(int cmd)
1642{ 1619{
@@ -1773,7 +1750,6 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1773 stats->data_bytes += len; 1750 stats->data_bytes += len;
1774 } 1751 }
1775} 1752}
1776EXPORT_SYMBOL(iwl_update_stats);
1777#endif 1753#endif
1778 1754
1779static void iwl_force_rf_reset(struct iwl_priv *priv) 1755static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1912,7 +1888,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1912 mutex_unlock(&priv->mutex); 1888 mutex_unlock(&priv->mutex);
1913 return err; 1889 return err;
1914} 1890}
1915EXPORT_SYMBOL(iwl_mac_change_interface);
1916 1891
1917/* 1892/*
1918 * On every watchdog tick we check (latest) time stamp. If it does not 1893 * On every watchdog tick we check (latest) time stamp. If it does not
@@ -1984,7 +1959,6 @@ void iwl_bg_watchdog(unsigned long data)
1984 mod_timer(&priv->watchdog, jiffies + 1959 mod_timer(&priv->watchdog, jiffies +
1985 msecs_to_jiffies(IWL_WD_TICK(timeout))); 1960 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1986} 1961}
1987EXPORT_SYMBOL(iwl_bg_watchdog);
1988 1962
1989void iwl_setup_watchdog(struct iwl_priv *priv) 1963void iwl_setup_watchdog(struct iwl_priv *priv)
1990{ 1964{
@@ -1996,7 +1970,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1996 else 1970 else
1997 del_timer(&priv->watchdog); 1971 del_timer(&priv->watchdog);
1998} 1972}
1999EXPORT_SYMBOL(iwl_setup_watchdog);
2000 1973
2001/* 1974/*
2002 * extended beacon time format 1975 * extended beacon time format
@@ -2022,7 +1995,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2022 1995
2023 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; 1996 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2024} 1997}
2025EXPORT_SYMBOL(iwl_usecs_to_beacons);
2026 1998
2027/* base is usually what we get from ucode with each received frame, 1999/* base is usually what we get from ucode with each received frame,
2028 * the same as HW timer counter counting down 2000 * the same as HW timer counter counting down
@@ -2050,7 +2022,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2050 2022
2051 return cpu_to_le32(res); 2023 return cpu_to_le32(res);
2052} 2024}
2053EXPORT_SYMBOL(iwl_add_beacon_time);
2054 2025
2055#ifdef CONFIG_PM 2026#ifdef CONFIG_PM
2056 2027
@@ -2070,7 +2041,6 @@ int iwl_pci_suspend(struct device *device)
2070 2041
2071 return 0; 2042 return 0;
2072} 2043}
2073EXPORT_SYMBOL(iwl_pci_suspend);
2074 2044
2075int iwl_pci_resume(struct device *device) 2045int iwl_pci_resume(struct device *device)
2076{ 2046{
@@ -2099,7 +2069,6 @@ int iwl_pci_resume(struct device *device)
2099 2069
2100 return 0; 2070 return 0;
2101} 2071}
2102EXPORT_SYMBOL(iwl_pci_resume);
2103 2072
2104const struct dev_pm_ops iwl_pm_ops = { 2073const struct dev_pm_ops iwl_pm_ops = {
2105 .suspend = iwl_pci_suspend, 2074 .suspend = iwl_pci_suspend,
@@ -2109,6 +2078,5 @@ const struct dev_pm_ops iwl_pm_ops = {
2109 .poweroff = iwl_pci_suspend, 2078 .poweroff = iwl_pci_suspend,
2110 .restore = iwl_pci_resume, 2079 .restore = iwl_pci_resume,
2111}; 2080};
2112EXPORT_SYMBOL(iwl_pm_ops);
2113 2081
2114#endif /* CONFIG_PM */ 2082#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index e0ec17079dc0..d47f3a87fce4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -210,12 +210,7 @@ struct iwl_lib_ops {
210 210
211 /* temperature */ 211 /* temperature */
212 struct iwl_temp_ops temp_ops; 212 struct iwl_temp_ops temp_ops;
213 /* check for plcp health */ 213
214 bool (*check_plcp_health)(struct iwl_priv *priv,
215 struct iwl_rx_packet *pkt);
216 /* check for ack health */
217 bool (*check_ack_health)(struct iwl_priv *priv,
218 struct iwl_rx_packet *pkt);
219 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 214 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
220 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 215 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
221 216
@@ -261,6 +256,8 @@ struct iwl_mod_params {
261 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 256 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
262 int antenna; /* def: 0 = both antennas (use diversity) */ 257 int antenna; /* def: 0 = both antennas (use diversity) */
263 int restart_fw; /* def: 1 = restart firmware */ 258 int restart_fw; /* def: 1 = restart firmware */
259 bool plcp_check; /* def: true = enable plcp health check */
260 bool ack_check; /* def: false = disable ack health check */
264}; 261};
265 262
266/* 263/*
@@ -339,6 +336,7 @@ struct iwl_bt_params {
339 u8 ampdu_factor; 336 u8 ampdu_factor;
340 u8 ampdu_density; 337 u8 ampdu_density;
341 bool bt_sco_disable; 338 bool bt_sco_disable;
339 bool bt_session_2;
342}; 340};
343/* 341/*
344 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 342 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -509,6 +507,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
509* RX 507* RX
510******************************************************/ 508******************************************************/
511void iwl_cmd_queue_free(struct iwl_priv *priv); 509void iwl_cmd_queue_free(struct iwl_priv *priv);
510void iwl_cmd_queue_unmap(struct iwl_priv *priv);
512int iwl_rx_queue_alloc(struct iwl_priv *priv); 511int iwl_rx_queue_alloc(struct iwl_priv *priv);
513void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 512void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
514 struct iwl_rx_queue *q); 513 struct iwl_rx_queue *q);
@@ -517,8 +516,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
517/* Handlers */ 516/* Handlers */
518void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 517void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
519 struct iwl_rx_mem_buffer *rxb); 518 struct iwl_rx_mem_buffer *rxb);
520void iwl_recover_from_statistics(struct iwl_priv *priv,
521 struct iwl_rx_packet *pkt);
522void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); 519void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
523void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 520void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
524 521
@@ -533,6 +530,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
533void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 530void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
534 int slots_num, u32 txq_id); 531 int slots_num, u32 txq_id);
535void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 532void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
533void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
536void iwl_setup_watchdog(struct iwl_priv *priv); 534void iwl_setup_watchdog(struct iwl_priv *priv);
537/***************************************************** 535/*****************************************************
538 * TX power 536 * TX power
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index bc7a965c18f9..8842411f1cf3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -1788,7 +1788,6 @@ err:
1788 iwl_dbgfs_unregister(priv); 1788 iwl_dbgfs_unregister(priv);
1789 return -ENOMEM; 1789 return -ENOMEM;
1790} 1790}
1791EXPORT_SYMBOL(iwl_dbgfs_register);
1792 1791
1793/** 1792/**
1794 * Remove the debugfs files and directories 1793 * Remove the debugfs files and directories
@@ -1802,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1802 debugfs_remove_recursive(priv->debugfs_dir); 1801 debugfs_remove_recursive(priv->debugfs_dir);
1803 priv->debugfs_dir = NULL; 1802 priv->debugfs_dir = NULL;
1804} 1803}
1805EXPORT_SYMBOL(iwl_dbgfs_unregister);
1806 1804
1807 1805
1808 1806
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ecfbef402781..58165c769cf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,14 +43,14 @@
43#include "iwl-prph.h" 43#include "iwl-prph.h"
44#include "iwl-fh.h" 44#include "iwl-fh.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-agn-hw.h" 46#include "iwl-agn-hw.h"
49#include "iwl-led.h" 47#include "iwl-led.h"
50#include "iwl-power.h" 48#include "iwl-power.h"
51#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
52#include "iwl-agn-tt.h" 50#include "iwl-agn-tt.h"
53 51
52#define U32_PAD(n) ((4-(n))&0x3)
53
54struct iwl_tx_queue; 54struct iwl_tx_queue;
55 55
56/* CT-KILL constants */ 56/* CT-KILL constants */
@@ -1110,6 +1110,11 @@ struct iwl_event_log {
1110/* BT Antenna Coupling Threshold (dB) */ 1110/* BT Antenna Coupling Threshold (dB) */
1111#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) 1111#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
1112 1112
1113/* Firmware reload counter and Timestamp */
1114#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
1115#define IWL_MAX_CONTINUE_RELOAD_CNT 4
1116
1117
1113enum iwl_reset { 1118enum iwl_reset {
1114 IWL_RF_RESET = 0, 1119 IWL_RF_RESET = 0,
1115 IWL_FW_RESET, 1120 IWL_FW_RESET,
@@ -1262,6 +1267,10 @@ struct iwl_priv {
1262 /* force reset */ 1267 /* force reset */
1263 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 1268 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1264 1269
1270 /* firmware reload counter and timestamp */
1271 unsigned long reload_jiffies;
1272 int reload_count;
1273
1265 /* we allocate array of iwl_channel_info for NIC's valid channels. 1274 /* we allocate array of iwl_channel_info for NIC's valid channels.
1266 * Access via channel # using indirect index array */ 1275 * Access via channel # using indirect index array */
1267 struct iwl_channel_info *channel_info; /* channel info array */ 1276 struct iwl_channel_info *channel_info; /* channel info array */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 358cfd7e5af1..833194a2c639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size); 222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
223 return &priv->eeprom[offset]; 223 return &priv->eeprom[offset];
224} 224}
225EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
226 225
227static int iwl_init_otp_access(struct iwl_priv *priv) 226static int iwl_init_otp_access(struct iwl_priv *priv)
228{ 227{
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
382{ 381{
383 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); 382 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
384} 383}
385EXPORT_SYMBOL(iwl_eeprom_query_addr);
386 384
387u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) 385u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
388{ 386{
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
390 return 0; 388 return 0;
391 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); 389 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
392} 390}
393EXPORT_SYMBOL(iwl_eeprom_query16);
394 391
395/** 392/**
396 * iwl_eeprom_init - read EEPROM contents 393 * iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
509alloc_err: 506alloc_err:
510 return ret; 507 return ret;
511} 508}
512EXPORT_SYMBOL(iwl_eeprom_init);
513 509
514void iwl_eeprom_free(struct iwl_priv *priv) 510void iwl_eeprom_free(struct iwl_priv *priv)
515{ 511{
516 kfree(priv->eeprom); 512 kfree(priv->eeprom);
517 priv->eeprom = NULL; 513 priv->eeprom = NULL;
518} 514}
519EXPORT_SYMBOL(iwl_eeprom_free);
520 515
521static void iwl_init_band_reference(const struct iwl_priv *priv, 516static void iwl_init_band_reference(const struct iwl_priv *priv,
522 int eep_band, int *eeprom_ch_count, 517 int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
779 774
780 return 0; 775 return 0;
781} 776}
782EXPORT_SYMBOL(iwl_init_channel_map);
783 777
784/* 778/*
785 * iwl_free_channel_map - undo allocations in iwl_init_channel_map 779 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
789 kfree(priv->channel_info); 783 kfree(priv->channel_info);
790 priv->channel_count = 0; 784 priv->channel_count = 0;
791} 785}
792EXPORT_SYMBOL(iwl_free_channel_map);
793 786
794/** 787/**
795 * iwl_get_channel_info - Find driver's private channel info 788 * iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
818 811
819 return NULL; 812 return NULL;
820} 813}
821EXPORT_SYMBOL(iwl_get_channel_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index e4b953d7b7bf..02499f684683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -114,7 +114,6 @@ const char *get_cmd_string(u8 cmd)
114 114
115 } 115 }
116} 116}
117EXPORT_SYMBOL(get_cmd_string);
118 117
119#define HOST_COMPLETE_TIMEOUT (HZ / 2) 118#define HOST_COMPLETE_TIMEOUT (HZ / 2)
120 119
@@ -253,7 +252,6 @@ out:
253 mutex_unlock(&priv->sync_cmd_mutex); 252 mutex_unlock(&priv->sync_cmd_mutex);
254 return ret; 253 return ret;
255} 254}
256EXPORT_SYMBOL(iwl_send_cmd_sync);
257 255
258int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 256int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
259{ 257{
@@ -262,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
262 260
263 return iwl_send_cmd_sync(priv, cmd); 261 return iwl_send_cmd_sync(priv, cmd);
264} 262}
265EXPORT_SYMBOL(iwl_send_cmd);
266 263
267int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) 264int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
268{ 265{
@@ -274,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
274 271
275 return iwl_send_cmd_sync(priv, &cmd); 272 return iwl_send_cmd_sync(priv, &cmd);
276} 273}
277EXPORT_SYMBOL(iwl_send_cmd_pdu);
278 274
279int iwl_send_cmd_pdu_async(struct iwl_priv *priv, 275int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
280 u8 id, u16 len, const void *data, 276 u8 id, u16 len, const void *data,
@@ -293,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
293 289
294 return iwl_send_cmd_async(priv, &cmd); 290 return iwl_send_cmd_async(priv, &cmd);
295} 291}
296EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 074ad2275228..d7f2a0bb32c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -175,7 +175,6 @@ void iwl_leds_init(struct iwl_priv *priv)
175 175
176 priv->led_registered = true; 176 priv->led_registered = true;
177} 177}
178EXPORT_SYMBOL(iwl_leds_init);
179 178
180void iwl_leds_exit(struct iwl_priv *priv) 179void iwl_leds_exit(struct iwl_priv *priv)
181{ 180{
@@ -185,4 +184,3 @@ void iwl_leds_exit(struct iwl_priv *priv)
185 led_classdev_unregister(&priv->led); 184 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name); 185 kfree(priv->led.name);
187} 186}
188EXPORT_SYMBOL(iwl_leds_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644
index e1ace3ce30b3..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ /dev/null
@@ -1,657 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-helpers.h"
35#include "iwl-legacy.h"
36
37static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
38{
39 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
40 return;
41
42 if (!ctx->is_active)
43 return;
44
45 ctx->qos_data.def_qos_parm.qos_flags = 0;
46
47 if (ctx->qos_data.qos_active)
48 ctx->qos_data.def_qos_parm.qos_flags |=
49 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
50
51 if (ctx->ht.enabled)
52 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
53
54 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
55 ctx->qos_data.qos_active,
56 ctx->qos_data.def_qos_parm.qos_flags);
57
58 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
59 sizeof(struct iwl_qosparam_cmd),
60 &ctx->qos_data.def_qos_parm, NULL);
61}
62
63/**
64 * iwl_legacy_mac_config - mac80211 config callback
65 */
66int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
67{
68 struct iwl_priv *priv = hw->priv;
69 const struct iwl_channel_info *ch_info;
70 struct ieee80211_conf *conf = &hw->conf;
71 struct ieee80211_channel *channel = conf->channel;
72 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
73 struct iwl_rxon_context *ctx;
74 unsigned long flags = 0;
75 int ret = 0;
76 u16 ch;
77 int scan_active = 0;
78 bool ht_changed[NUM_IWL_RXON_CTX] = {};
79
80 if (WARN_ON(!priv->cfg->ops->legacy))
81 return -EOPNOTSUPP;
82
83 mutex_lock(&priv->mutex);
84
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed);
87
88 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
89 scan_active = 1;
90 IWL_DEBUG_MAC80211(priv, "scan active\n");
91 }
92
93 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
94 IEEE80211_CONF_CHANGE_CHANNEL)) {
95 /* mac80211 uses static for non-HT which is what we want */
96 priv->current_ht_config.smps = conf->smps_mode;
97
98 /*
99 * Recalculate chain counts.
100 *
101 * If monitor mode is enabled then mac80211 will
102 * set up the SM PS mode to OFF if an HT channel is
103 * configured.
104 */
105 if (priv->cfg->ops->hcmd->set_rxon_chain)
106 for_each_context(priv, ctx)
107 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
108 }
109
110 /* during scanning mac80211 will delay channel setting until
111 * scan finish with changed = 0
112 */
113 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
114 if (scan_active)
115 goto set_ch_out;
116
117 ch = channel->hw_value;
118 ch_info = iwl_get_channel_info(priv, channel->band, ch);
119 if (!is_channel_valid(ch_info)) {
120 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
121 ret = -EINVAL;
122 goto set_ch_out;
123 }
124
125 spin_lock_irqsave(&priv->lock, flags);
126
127 for_each_context(priv, ctx) {
128 /* Configure HT40 channels */
129 if (ctx->ht.enabled != conf_is_ht(conf)) {
130 ctx->ht.enabled = conf_is_ht(conf);
131 ht_changed[ctx->ctxid] = true;
132 }
133 if (ctx->ht.enabled) {
134 if (conf_is_ht40_minus(conf)) {
135 ctx->ht.extension_chan_offset =
136 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
137 ctx->ht.is_40mhz = true;
138 } else if (conf_is_ht40_plus(conf)) {
139 ctx->ht.extension_chan_offset =
140 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
141 ctx->ht.is_40mhz = true;
142 } else {
143 ctx->ht.extension_chan_offset =
144 IEEE80211_HT_PARAM_CHA_SEC_NONE;
145 ctx->ht.is_40mhz = false;
146 }
147 } else
148 ctx->ht.is_40mhz = false;
149
150 /*
151 * Default to no protection. Protection mode will
152 * later be set from BSS config in iwl_ht_conf
153 */
154 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
155
156 /* if we are switching from ht to 2.4 clear flags
157 * from any ht related info since 2.4 does not
158 * support ht */
159 if ((le16_to_cpu(ctx->staging.channel) != ch))
160 ctx->staging.flags = 0;
161
162 iwl_set_rxon_channel(priv, channel, ctx);
163 iwl_set_rxon_ht(priv, ht_conf);
164
165 iwl_set_flags_for_band(priv, ctx, channel->band,
166 ctx->vif);
167 }
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 if (priv->cfg->ops->legacy->update_bcast_stations)
172 ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
173
174 set_ch_out:
175 /* The list of supported rates and rate mask can be different
176 * for each band; since the band may have changed, reset
177 * the rate mask to what mac80211 lists */
178 iwl_set_rate(priv);
179 }
180
181 if (changed & (IEEE80211_CONF_CHANGE_PS |
182 IEEE80211_CONF_CHANGE_IDLE)) {
183 ret = iwl_power_update_mode(priv, false);
184 if (ret)
185 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
186 }
187
188 if (changed & IEEE80211_CONF_CHANGE_POWER) {
189 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
190 priv->tx_power_user_lmt, conf->power_level);
191
192 iwl_set_tx_power(priv, conf->power_level, false);
193 }
194
195 if (!iwl_is_ready(priv)) {
196 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
197 goto out;
198 }
199
200 if (scan_active)
201 goto out;
202
203 for_each_context(priv, ctx) {
204 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
205 iwlcore_commit_rxon(priv, ctx);
206 else
207 IWL_DEBUG_INFO(priv,
208 "Not re-sending same RXON configuration.\n");
209 if (ht_changed[ctx->ctxid])
210 iwl_update_qos(priv, ctx);
211 }
212
213out:
214 IWL_DEBUG_MAC80211(priv, "leave\n");
215 mutex_unlock(&priv->mutex);
216 return ret;
217}
218EXPORT_SYMBOL(iwl_legacy_mac_config);
219
220void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
221{
222 struct iwl_priv *priv = hw->priv;
223 unsigned long flags;
224 /* IBSS can only be the IWL_RXON_CTX_BSS context */
225 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
226
227 if (WARN_ON(!priv->cfg->ops->legacy))
228 return;
229
230 mutex_lock(&priv->mutex);
231 IWL_DEBUG_MAC80211(priv, "enter\n");
232
233 spin_lock_irqsave(&priv->lock, flags);
234 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
235 spin_unlock_irqrestore(&priv->lock, flags);
236
237 spin_lock_irqsave(&priv->lock, flags);
238
239 /* new association get rid of ibss beacon skb */
240 if (priv->beacon_skb)
241 dev_kfree_skb(priv->beacon_skb);
242
243 priv->beacon_skb = NULL;
244
245 priv->timestamp = 0;
246
247 spin_unlock_irqrestore(&priv->lock, flags);
248
249 iwl_scan_cancel_timeout(priv, 100);
250 if (!iwl_is_ready_rf(priv)) {
251 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
252 mutex_unlock(&priv->mutex);
253 return;
254 }
255
256 /* we are restarting association process
257 * clear RXON_FILTER_ASSOC_MSK bit
258 */
259 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
260 iwlcore_commit_rxon(priv, ctx);
261
262 iwl_set_rate(priv);
263
264 mutex_unlock(&priv->mutex);
265
266 IWL_DEBUG_MAC80211(priv, "leave\n");
267}
268EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
269
270static void iwl_ht_conf(struct iwl_priv *priv,
271 struct ieee80211_vif *vif)
272{
273 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
274 struct ieee80211_sta *sta;
275 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
276 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
277
278 IWL_DEBUG_ASSOC(priv, "enter:\n");
279
280 if (!ctx->ht.enabled)
281 return;
282
283 ctx->ht.protection =
284 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
285 ctx->ht.non_gf_sta_present =
286 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
287
288 ht_conf->single_chain_sufficient = false;
289
290 switch (vif->type) {
291 case NL80211_IFTYPE_STATION:
292 rcu_read_lock();
293 sta = ieee80211_find_sta(vif, bss_conf->bssid);
294 if (sta) {
295 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
296 int maxstreams;
297
298 maxstreams = (ht_cap->mcs.tx_params &
299 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
300 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
301 maxstreams += 1;
302
303 if ((ht_cap->mcs.rx_mask[1] == 0) &&
304 (ht_cap->mcs.rx_mask[2] == 0))
305 ht_conf->single_chain_sufficient = true;
306 if (maxstreams <= 1)
307 ht_conf->single_chain_sufficient = true;
308 } else {
309 /*
310 * If at all, this can only happen through a race
311 * when the AP disconnects us while we're still
312 * setting up the connection, in that case mac80211
313 * will soon tell us about that.
314 */
315 ht_conf->single_chain_sufficient = true;
316 }
317 rcu_read_unlock();
318 break;
319 case NL80211_IFTYPE_ADHOC:
320 ht_conf->single_chain_sufficient = true;
321 break;
322 default:
323 break;
324 }
325
326 IWL_DEBUG_ASSOC(priv, "leave\n");
327}
328
329static inline void iwl_set_no_assoc(struct iwl_priv *priv,
330 struct ieee80211_vif *vif)
331{
332 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
333
334 /*
335 * inform the ucode that there is no longer an
336 * association and that no more packets should be
337 * sent
338 */
339 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
340 ctx->staging.assoc_id = 0;
341 iwlcore_commit_rxon(priv, ctx);
342}
343
344static void iwlcore_beacon_update(struct ieee80211_hw *hw,
345 struct ieee80211_vif *vif)
346{
347 struct iwl_priv *priv = hw->priv;
348 unsigned long flags;
349 __le64 timestamp;
350 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
351
352 if (!skb)
353 return;
354
355 IWL_DEBUG_MAC80211(priv, "enter\n");
356
357 lockdep_assert_held(&priv->mutex);
358
359 if (!priv->beacon_ctx) {
360 IWL_ERR(priv, "update beacon but no beacon context!\n");
361 dev_kfree_skb(skb);
362 return;
363 }
364
365 spin_lock_irqsave(&priv->lock, flags);
366
367 if (priv->beacon_skb)
368 dev_kfree_skb(priv->beacon_skb);
369
370 priv->beacon_skb = skb;
371
372 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
373 priv->timestamp = le64_to_cpu(timestamp);
374
375 IWL_DEBUG_MAC80211(priv, "leave\n");
376 spin_unlock_irqrestore(&priv->lock, flags);
377
378 if (!iwl_is_ready_rf(priv)) {
379 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
380 return;
381 }
382
383 priv->cfg->ops->legacy->post_associate(priv);
384}
385
386void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
387 struct ieee80211_vif *vif,
388 struct ieee80211_bss_conf *bss_conf,
389 u32 changes)
390{
391 struct iwl_priv *priv = hw->priv;
392 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
393 int ret;
394
395 if (WARN_ON(!priv->cfg->ops->legacy))
396 return;
397
398 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
399
400 if (!iwl_is_alive(priv))
401 return;
402
403 mutex_lock(&priv->mutex);
404
405 if (changes & BSS_CHANGED_QOS) {
406 unsigned long flags;
407
408 spin_lock_irqsave(&priv->lock, flags);
409 ctx->qos_data.qos_active = bss_conf->qos;
410 iwl_update_qos(priv, ctx);
411 spin_unlock_irqrestore(&priv->lock, flags);
412 }
413
414 if (changes & BSS_CHANGED_BEACON_ENABLED) {
415 /*
416 * the add_interface code must make sure we only ever
417 * have a single interface that could be beaconing at
418 * any time.
419 */
420 if (vif->bss_conf.enable_beacon)
421 priv->beacon_ctx = ctx;
422 else
423 priv->beacon_ctx = NULL;
424 }
425
426 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
427 dev_kfree_skb(priv->beacon_skb);
428 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
429 }
430
431 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
432 iwl_send_rxon_timing(priv, ctx);
433
434 if (changes & BSS_CHANGED_BSSID) {
435 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
436
437 /*
438 * If there is currently a HW scan going on in the
439 * background then we need to cancel it else the RXON
440 * below/in post_associate will fail.
441 */
442 if (iwl_scan_cancel_timeout(priv, 100)) {
443 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
444 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
445 mutex_unlock(&priv->mutex);
446 return;
447 }
448
449 /* mac80211 only sets assoc when in STATION mode */
450 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
451 memcpy(ctx->staging.bssid_addr,
452 bss_conf->bssid, ETH_ALEN);
453
454 /* currently needed in a few places */
455 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
456 } else {
457 ctx->staging.filter_flags &=
458 ~RXON_FILTER_ASSOC_MSK;
459 }
460
461 }
462
463 /*
464 * This needs to be after setting the BSSID in case
465 * mac80211 decides to do both changes at once because
466 * it will invoke post_associate.
467 */
468 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
469 iwlcore_beacon_update(hw, vif);
470
471 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
472 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
473 bss_conf->use_short_preamble);
474 if (bss_conf->use_short_preamble)
475 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
476 else
477 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
478 }
479
480 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
481 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
482 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
483 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
484 else
485 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
486 if (bss_conf->use_cts_prot)
487 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
488 else
489 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
490 }
491
492 if (changes & BSS_CHANGED_BASIC_RATES) {
493 /* XXX use this information
494 *
495 * To do that, remove code from iwl_set_rate() and put something
496 * like this here:
497 *
498 if (A-band)
499 ctx->staging.ofdm_basic_rates =
500 bss_conf->basic_rates;
501 else
502 ctx->staging.ofdm_basic_rates =
503 bss_conf->basic_rates >> 4;
504 ctx->staging.cck_basic_rates =
505 bss_conf->basic_rates & 0xF;
506 */
507 }
508
509 if (changes & BSS_CHANGED_HT) {
510 iwl_ht_conf(priv, vif);
511
512 if (priv->cfg->ops->hcmd->set_rxon_chain)
513 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
514 }
515
516 if (changes & BSS_CHANGED_ASSOC) {
517 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
518 if (bss_conf->assoc) {
519 priv->timestamp = bss_conf->timestamp;
520
521 if (!iwl_is_rfkill(priv))
522 priv->cfg->ops->legacy->post_associate(priv);
523 } else
524 iwl_set_no_assoc(priv, vif);
525 }
526
527 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
528 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
529 changes);
530 ret = iwl_send_rxon_assoc(priv, ctx);
531 if (!ret) {
532 /* Sync active_rxon with latest change. */
533 memcpy((void *)&ctx->active,
534 &ctx->staging,
535 sizeof(struct iwl_rxon_cmd));
536 }
537 }
538
539 if (changes & BSS_CHANGED_BEACON_ENABLED) {
540 if (vif->bss_conf.enable_beacon) {
541 memcpy(ctx->staging.bssid_addr,
542 bss_conf->bssid, ETH_ALEN);
543 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
544 priv->cfg->ops->legacy->config_ap(priv);
545 } else
546 iwl_set_no_assoc(priv, vif);
547 }
548
549 if (changes & BSS_CHANGED_IBSS) {
550 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
551 bss_conf->ibss_joined);
552 if (ret)
553 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
554 bss_conf->ibss_joined ? "add" : "remove",
555 bss_conf->bssid);
556 }
557
558 mutex_unlock(&priv->mutex);
559
560 IWL_DEBUG_MAC80211(priv, "leave\n");
561}
562EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
563
564irqreturn_t iwl_isr_legacy(int irq, void *data)
565{
566 struct iwl_priv *priv = data;
567 u32 inta, inta_mask;
568 u32 inta_fh;
569 unsigned long flags;
570 if (!priv)
571 return IRQ_NONE;
572
573 spin_lock_irqsave(&priv->lock, flags);
574
575 /* Disable (but don't clear!) interrupts here to avoid
576 * back-to-back ISRs and sporadic interrupts from our NIC.
577 * If we have something to service, the tasklet will re-enable ints.
578 * If we *don't* have something, we'll re-enable before leaving here. */
579 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
580 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
581
582 /* Discover which interrupts are active/pending */
583 inta = iwl_read32(priv, CSR_INT);
584 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
585
586 /* Ignore interrupt if there's nothing in NIC to service.
587 * This may be due to IRQ shared with another device,
588 * or due to sporadic interrupts thrown from our NIC. */
589 if (!inta && !inta_fh) {
590 IWL_DEBUG_ISR(priv,
591 "Ignore interrupt, inta == 0, inta_fh == 0\n");
592 goto none;
593 }
594
595 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
596 /* Hardware disappeared. It might have already raised
597 * an interrupt */
598 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
599 goto unplugged;
600 }
601
602 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
603 inta, inta_mask, inta_fh);
604
605 inta &= ~CSR_INT_BIT_SCD;
606
607 /* iwl_irq_tasklet() will service interrupts and re-enable them */
608 if (likely(inta || inta_fh))
609 tasklet_schedule(&priv->irq_tasklet);
610
611unplugged:
612 spin_unlock_irqrestore(&priv->lock, flags);
613 return IRQ_HANDLED;
614
615none:
616 /* re-enable interrupts here since we don't have anything to service. */
617 /* only Re-enable if disabled by irq */
618 if (test_bit(STATUS_INT_ENABLED, &priv->status))
619 iwl_enable_interrupts(priv);
620 spin_unlock_irqrestore(&priv->lock, flags);
621 return IRQ_NONE;
622}
623EXPORT_SYMBOL(iwl_isr_legacy);
624
625/*
626 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
627 * function.
628 */
629void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
630 struct ieee80211_tx_info *info,
631 __le16 fc, __le32 *tx_flags)
632{
633 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
634 *tx_flags |= TX_CMD_FLG_RTS_MSK;
635 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
636 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
637
638 if (!ieee80211_is_mgmt(fc))
639 return;
640
641 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
642 case cpu_to_le16(IEEE80211_STYPE_AUTH):
643 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
644 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
646 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
647 *tx_flags |= TX_CMD_FLG_CTS_MSK;
648 break;
649 }
650 } else if (info->control.rates[0].flags &
651 IEEE80211_TX_RC_USE_CTS_PROTECT) {
652 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
653 *tx_flags |= TX_CMD_FLG_CTS_MSK;
654 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
655 }
656}
657EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1d1bf3234d8d..576795e2c75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -425,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
425 425
426 return ret; 426 return ret;
427} 427}
428EXPORT_SYMBOL(iwl_power_set_mode);
429 428
430int iwl_power_update_mode(struct iwl_priv *priv, bool force) 429int iwl_power_update_mode(struct iwl_priv *priv, bool force)
431{ 430{
@@ -434,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
434 iwl_power_build_cmd(priv, &cmd); 433 iwl_power_build_cmd(priv, &cmd);
435 return iwl_power_set_mode(priv, &cmd, force); 434 return iwl_power_set_mode(priv, &cmd, force);
436} 435}
437EXPORT_SYMBOL(iwl_power_update_mode);
438 436
439/* initialize to default */ 437/* initialize to default */
440void iwl_power_initialize(struct iwl_priv *priv) 438void iwl_power_initialize(struct iwl_priv *priv)
@@ -448,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
448 memset(&priv->power_data.sleep_cmd, 0, 446 memset(&priv->power_data.sleep_cmd, 0,
449 sizeof(priv->power_data.sleep_cmd)); 447 sizeof(priv->power_data.sleep_cmd));
450} 448}
451EXPORT_SYMBOL(iwl_power_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index bc89393fb696..566e2d979ce3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -37,6 +37,7 @@
37#include "iwl-sta.h" 37#include "iwl-sta.h"
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-helpers.h" 39#include "iwl-helpers.h"
40#include "iwl-agn-calib.h"
40/************************** RX-FUNCTIONS ****************************/ 41/************************** RX-FUNCTIONS ****************************/
41/* 42/*
42 * Rx theory of operation 43 * Rx theory of operation
@@ -118,7 +119,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
118 s = 0; 119 s = 0;
119 return s; 120 return s;
120} 121}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122 122
123/** 123/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +170,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
170 exit_unlock: 170 exit_unlock:
171 spin_unlock_irqrestore(&q->lock, flags); 171 spin_unlock_irqrestore(&q->lock, flags);
172} 172}
173EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
174 173
175int iwl_rx_queue_alloc(struct iwl_priv *priv) 174int iwl_rx_queue_alloc(struct iwl_priv *priv)
176{ 175{
@@ -211,8 +210,6 @@ err_rb:
211err_bd: 210err_bd:
212 return -ENOMEM; 211 return -ENOMEM;
213} 212}
214EXPORT_SYMBOL(iwl_rx_queue_alloc);
215
216 213
217void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 214void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb) 215 struct iwl_rx_mem_buffer *rxb)
@@ -229,27 +226,397 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
229 memcpy(&priv->measure_report, report, sizeof(*report)); 226 memcpy(&priv->measure_report, report, sizeof(*report));
230 priv->measurement_status |= MEASUREMENT_READY; 227 priv->measurement_status |= MEASUREMENT_READY;
231} 228}
232EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
233 229
234void iwl_recover_from_statistics(struct iwl_priv *priv, 230/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
235 struct iwl_rx_packet *pkt) 231#define ACK_CNT_RATIO (50)
232#define BA_TIMEOUT_CNT (5)
233#define BA_TIMEOUT_MAX (16)
234
235/**
236 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
237 *
238 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
239 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
240 * operation state.
241 */
242static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
243{
244 int actual_delta, expected_delta, ba_timeout_delta;
245 struct statistics_tx *cur, *old;
246
247 if (priv->_agn.agg_tids_count)
248 return true;
249
250 if (iwl_bt_statistics(priv)) {
251 cur = &pkt->u.stats_bt.tx;
252 old = &priv->_agn.statistics_bt.tx;
253 } else {
254 cur = &pkt->u.stats.tx;
255 old = &priv->_agn.statistics.tx;
256 }
257
258 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
259 le32_to_cpu(old->actual_ack_cnt);
260 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
261 le32_to_cpu(old->expected_ack_cnt);
262
263 /* Values should not be negative, but we do not trust the firmware */
264 if (actual_delta <= 0 || expected_delta <= 0)
265 return true;
266
267 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
268 le32_to_cpu(old->agg.ba_timeout);
269
270 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
271 ba_timeout_delta > BA_TIMEOUT_CNT) {
272 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
273 actual_delta, expected_delta, ba_timeout_delta);
274
275#ifdef CONFIG_IWLWIFI_DEBUGFS
276 /*
277 * This is ifdef'ed on DEBUGFS because otherwise the
278 * statistics aren't available. If DEBUGFS is set but
279 * DEBUG is not, these will just compile out.
280 */
281 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
282 priv->_agn.delta_statistics.tx.rx_detected_cnt);
283 IWL_DEBUG_RADIO(priv,
284 "ack_or_ba_timeout_collision delta %d\n",
285 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
286#endif
287
288 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
289 return false;
290 }
291
292 return true;
293}
294
295/**
296 * iwl_good_plcp_health - checks for plcp error.
297 *
298 * When the plcp error is exceeding the thresholds, reset the radio
299 * to improve the throughput.
300 */
301static bool iwl_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
236{ 302{
303 bool rc = true;
304 int combined_plcp_delta;
305 unsigned int plcp_msec;
306 unsigned long plcp_received_jiffies;
307
308 if (priv->cfg->base_params->plcp_delta_threshold ==
309 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
310 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
311 return rc;
312 }
313
314 /*
315 * check for plcp_err and trigger radio reset if it exceeds
316 * the plcp error threshold plcp_delta.
317 */
318 plcp_received_jiffies = jiffies;
319 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
320 (long) priv->plcp_jiffies);
321 priv->plcp_jiffies = plcp_received_jiffies;
322 /*
323 * check to make sure plcp_msec is not 0 to prevent division
324 * by zero.
325 */
326 if (plcp_msec) {
327 struct statistics_rx_phy *ofdm;
328 struct statistics_rx_ht_phy *ofdm_ht;
329
330 if (iwl_bt_statistics(priv)) {
331 ofdm = &pkt->u.stats_bt.rx.ofdm;
332 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
333 combined_plcp_delta =
334 (le32_to_cpu(ofdm->plcp_err) -
335 le32_to_cpu(priv->_agn.statistics_bt.
336 rx.ofdm.plcp_err)) +
337 (le32_to_cpu(ofdm_ht->plcp_err) -
338 le32_to_cpu(priv->_agn.statistics_bt.
339 rx.ofdm_ht.plcp_err));
340 } else {
341 ofdm = &pkt->u.stats.rx.ofdm;
342 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
343 combined_plcp_delta =
344 (le32_to_cpu(ofdm->plcp_err) -
345 le32_to_cpu(priv->_agn.statistics.
346 rx.ofdm.plcp_err)) +
347 (le32_to_cpu(ofdm_ht->plcp_err) -
348 le32_to_cpu(priv->_agn.statistics.
349 rx.ofdm_ht.plcp_err));
350 }
351
352 if ((combined_plcp_delta > 0) &&
353 ((combined_plcp_delta * 100) / plcp_msec) >
354 priv->cfg->base_params->plcp_delta_threshold) {
355 /*
356 * if plcp_err exceed the threshold,
357 * the following data is printed in csv format:
358 * Text: plcp_err exceeded %d,
359 * Received ofdm.plcp_err,
360 * Current ofdm.plcp_err,
361 * Received ofdm_ht.plcp_err,
362 * Current ofdm_ht.plcp_err,
363 * combined_plcp_delta,
364 * plcp_msec
365 */
366 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
367 "%u, %u, %u, %u, %d, %u mSecs\n",
368 priv->cfg->base_params->plcp_delta_threshold,
369 le32_to_cpu(ofdm->plcp_err),
370 le32_to_cpu(ofdm->plcp_err),
371 le32_to_cpu(ofdm_ht->plcp_err),
372 le32_to_cpu(ofdm_ht->plcp_err),
373 combined_plcp_delta, plcp_msec);
374
375 rc = false;
376 }
377 }
378 return rc;
379}
380
381static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
382{
383 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
384
237 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 385 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
238 !iwl_is_any_associated(priv)) 386 !iwl_is_any_associated(priv))
239 return; 387 return;
240 388
241 if (priv->cfg->ops->lib->check_ack_health && 389 if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
242 !priv->cfg->ops->lib->check_ack_health(priv, pkt)) {
243 IWL_ERR(priv, "low ack count detected, restart firmware\n"); 390 IWL_ERR(priv, "low ack count detected, restart firmware\n");
244 if (!iwl_force_reset(priv, IWL_FW_RESET, false)) 391 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
245 return; 392 return;
246 } 393 }
247 394
248 if (priv->cfg->ops->lib->check_plcp_health && 395 if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt))
249 !priv->cfg->ops->lib->check_plcp_health(priv, pkt))
250 iwl_force_reset(priv, IWL_RF_RESET, false); 396 iwl_force_reset(priv, IWL_RF_RESET, false);
251} 397}
252EXPORT_SYMBOL(iwl_recover_from_statistics); 398
399/* Calculate noise level, based on measurements during network silence just
400 * before arriving beacon. This measurement can be done only if we know
401 * exactly when to expect beacons, therefore only when we're associated. */
402static void iwl_rx_calc_noise(struct iwl_priv *priv)
403{
404 struct statistics_rx_non_phy *rx_info;
405 int num_active_rx = 0;
406 int total_silence = 0;
407 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
408 int last_rx_noise;
409
410 if (iwl_bt_statistics(priv))
411 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
412 else
413 rx_info = &(priv->_agn.statistics.rx.general);
414 bcn_silence_a =
415 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
416 bcn_silence_b =
417 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
418 bcn_silence_c =
419 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
420
421 if (bcn_silence_a) {
422 total_silence += bcn_silence_a;
423 num_active_rx++;
424 }
425 if (bcn_silence_b) {
426 total_silence += bcn_silence_b;
427 num_active_rx++;
428 }
429 if (bcn_silence_c) {
430 total_silence += bcn_silence_c;
431 num_active_rx++;
432 }
433
434 /* Average among active antennas */
435 if (num_active_rx)
436 last_rx_noise = (total_silence / num_active_rx) - 107;
437 else
438 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
439
440 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
441 bcn_silence_a, bcn_silence_b, bcn_silence_c,
442 last_rx_noise);
443}
444
445#ifdef CONFIG_IWLWIFI_DEBUGFS
446/*
447 * based on the assumption of all statistics counter are in DWORD
448 * FIXME: This function is for debugging, do not deal with
449 * the case of counters roll-over.
450 */
451static void iwl_accumulative_statistics(struct iwl_priv *priv,
452 __le32 *stats)
453{
454 int i, size;
455 __le32 *prev_stats;
456 u32 *accum_stats;
457 u32 *delta, *max_delta;
458 struct statistics_general_common *general, *accum_general;
459 struct statistics_tx *tx, *accum_tx;
460
461 if (iwl_bt_statistics(priv)) {
462 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
463 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
464 size = sizeof(struct iwl_bt_notif_statistics);
465 general = &priv->_agn.statistics_bt.general.common;
466 accum_general = &priv->_agn.accum_statistics_bt.general.common;
467 tx = &priv->_agn.statistics_bt.tx;
468 accum_tx = &priv->_agn.accum_statistics_bt.tx;
469 delta = (u32 *)&priv->_agn.delta_statistics_bt;
470 max_delta = (u32 *)&priv->_agn.max_delta_bt;
471 } else {
472 prev_stats = (__le32 *)&priv->_agn.statistics;
473 accum_stats = (u32 *)&priv->_agn.accum_statistics;
474 size = sizeof(struct iwl_notif_statistics);
475 general = &priv->_agn.statistics.general.common;
476 accum_general = &priv->_agn.accum_statistics.general.common;
477 tx = &priv->_agn.statistics.tx;
478 accum_tx = &priv->_agn.accum_statistics.tx;
479 delta = (u32 *)&priv->_agn.delta_statistics;
480 max_delta = (u32 *)&priv->_agn.max_delta;
481 }
482 for (i = sizeof(__le32); i < size;
483 i += sizeof(__le32), stats++, prev_stats++, delta++,
484 max_delta++, accum_stats++) {
485 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
486 *delta = (le32_to_cpu(*stats) -
487 le32_to_cpu(*prev_stats));
488 *accum_stats += *delta;
489 if (*delta > *max_delta)
490 *max_delta = *delta;
491 }
492 }
493
494 /* reset accumulative statistics for "no-counter" type statistics */
495 accum_general->temperature = general->temperature;
496 accum_general->temperature_m = general->temperature_m;
497 accum_general->ttl_timestamp = general->ttl_timestamp;
498 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
499 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
500 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
501}
502#endif
503
504#define REG_RECALIB_PERIOD (60)
505
506void iwl_rx_statistics(struct iwl_priv *priv,
507 struct iwl_rx_mem_buffer *rxb)
508{
509 int change;
510 struct iwl_rx_packet *pkt = rxb_addr(rxb);
511
512 if (iwl_bt_statistics(priv)) {
513 IWL_DEBUG_RX(priv,
514 "Statistics notification received (%d vs %d).\n",
515 (int)sizeof(struct iwl_bt_notif_statistics),
516 le32_to_cpu(pkt->len_n_flags) &
517 FH_RSCSR_FRAME_SIZE_MSK);
518
519 change = ((priv->_agn.statistics_bt.general.common.temperature !=
520 pkt->u.stats_bt.general.common.temperature) ||
521 ((priv->_agn.statistics_bt.flag &
522 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
523 (pkt->u.stats_bt.flag &
524 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
525#ifdef CONFIG_IWLWIFI_DEBUGFS
526 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
527#endif
528
529 } else {
530 IWL_DEBUG_RX(priv,
531 "Statistics notification received (%d vs %d).\n",
532 (int)sizeof(struct iwl_notif_statistics),
533 le32_to_cpu(pkt->len_n_flags) &
534 FH_RSCSR_FRAME_SIZE_MSK);
535
536 change = ((priv->_agn.statistics.general.common.temperature !=
537 pkt->u.stats.general.common.temperature) ||
538 ((priv->_agn.statistics.flag &
539 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
540 (pkt->u.stats.flag &
541 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
542#ifdef CONFIG_IWLWIFI_DEBUGFS
543 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
544#endif
545
546 }
547
548 iwl_recover_from_statistics(priv, pkt);
549
550 if (iwl_bt_statistics(priv))
551 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
552 sizeof(priv->_agn.statistics_bt));
553 else
554 memcpy(&priv->_agn.statistics, &pkt->u.stats,
555 sizeof(priv->_agn.statistics));
556
557 set_bit(STATUS_STATISTICS, &priv->status);
558
559 /* Reschedule the statistics timer to occur in
560 * REG_RECALIB_PERIOD seconds to ensure we get a
561 * thermal update even if the uCode doesn't give
562 * us one */
563 mod_timer(&priv->statistics_periodic, jiffies +
564 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
565
566 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
567 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
568 iwl_rx_calc_noise(priv);
569 queue_work(priv->workqueue, &priv->run_time_calib_work);
570 }
571 if (priv->cfg->ops->lib->temp_ops.temperature && change)
572 priv->cfg->ops->lib->temp_ops.temperature(priv);
573}
574
575void iwl_reply_statistics(struct iwl_priv *priv,
576 struct iwl_rx_mem_buffer *rxb)
577{
578 struct iwl_rx_packet *pkt = rxb_addr(rxb);
579
580 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
581#ifdef CONFIG_IWLWIFI_DEBUGFS
582 memset(&priv->_agn.accum_statistics, 0,
583 sizeof(struct iwl_notif_statistics));
584 memset(&priv->_agn.delta_statistics, 0,
585 sizeof(struct iwl_notif_statistics));
586 memset(&priv->_agn.max_delta, 0,
587 sizeof(struct iwl_notif_statistics));
588 memset(&priv->_agn.accum_statistics_bt, 0,
589 sizeof(struct iwl_bt_notif_statistics));
590 memset(&priv->_agn.delta_statistics_bt, 0,
591 sizeof(struct iwl_bt_notif_statistics));
592 memset(&priv->_agn.max_delta_bt, 0,
593 sizeof(struct iwl_bt_notif_statistics));
594#endif
595 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
596 }
597 iwl_rx_statistics(priv, rxb);
598}
599
600void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
601 struct iwl_rx_mem_buffer *rxb)
602
603{
604 struct iwl_rx_packet *pkt = rxb_addr(rxb);
605 struct iwl_missed_beacon_notif *missed_beacon;
606
607 missed_beacon = &pkt->u.missed_beacon;
608 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
609 priv->missed_beacon_threshold) {
610 IWL_DEBUG_CALIB(priv,
611 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
612 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
613 le32_to_cpu(missed_beacon->total_missed_becons),
614 le32_to_cpu(missed_beacon->num_recvd_beacons),
615 le32_to_cpu(missed_beacon->num_expected_beacons));
616 if (!test_bit(STATUS_SCANNING, &priv->status))
617 iwl_init_sensitivity(priv);
618 }
619}
253 620
254/* 621/*
255 * returns non-zero if packet should be dropped 622 * returns non-zero if packet should be dropped
@@ -302,4 +669,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
302 } 669 }
303 return 0; 670 return 0;
304} 671}
305EXPORT_SYMBOL(iwl_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 08f1bea8b652..faa6d34cb658 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
155 queue_work(priv->workqueue, &priv->abort_scan); 155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0; 156 return 0;
157} 157}
158EXPORT_SYMBOL(iwl_scan_cancel);
159 158
160/** 159/**
161 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan 160 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
180 179
181 return test_bit(STATUS_SCAN_HW, &priv->status); 180 return test_bit(STATUS_SCAN_HW, &priv->status);
182} 181}
183EXPORT_SYMBOL(iwl_scan_cancel_timeout);
184 182
185/* Service response to REPLY_SCAN_CMD (0x80) */ 183/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_rx_reply_scan(struct iwl_priv *priv, 184static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -288,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
288 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 286 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
289 iwl_rx_scan_complete_notif; 287 iwl_rx_scan_complete_notif;
290} 288}
291EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
292 289
293inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 290inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
294 enum ieee80211_band band, 291 enum ieee80211_band band,
@@ -301,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
301 return IWL_ACTIVE_DWELL_TIME_24 + 298 return IWL_ACTIVE_DWELL_TIME_24 +
302 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 299 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
303} 300}
304EXPORT_SYMBOL(iwl_get_active_dwell_time);
305 301
306u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 302u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
307 enum ieee80211_band band, 303 enum ieee80211_band band,
@@ -333,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
333 329
334 return passive; 330 return passive;
335} 331}
336EXPORT_SYMBOL(iwl_get_passive_dwell_time);
337 332
338void iwl_init_scan_params(struct iwl_priv *priv) 333void iwl_init_scan_params(struct iwl_priv *priv)
339{ 334{
@@ -343,7 +338,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
343 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 338 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
344 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 339 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
345} 340}
346EXPORT_SYMBOL(iwl_init_scan_params);
347 341
348static int __must_check iwl_scan_initiate(struct iwl_priv *priv, 342static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
349 struct ieee80211_vif *vif, 343 struct ieee80211_vif *vif,
@@ -439,7 +433,6 @@ out_unlock:
439 433
440 return ret; 434 return ret;
441} 435}
442EXPORT_SYMBOL(iwl_mac_hw_scan);
443 436
444/* 437/*
445 * internal short scan, this function should only been called while associated. 438 * internal short scan, this function should only been called while associated.
@@ -536,7 +529,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
536 529
537 return (u16)len; 530 return (u16)len;
538} 531}
539EXPORT_SYMBOL(iwl_fill_probe_req);
540 532
541static void iwl_bg_abort_scan(struct work_struct *work) 533static void iwl_bg_abort_scan(struct work_struct *work)
542{ 534{
@@ -621,7 +613,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
621 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); 613 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
622 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 614 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
623} 615}
624EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
625 616
626void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) 617void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
627{ 618{
@@ -635,4 +626,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
635 mutex_unlock(&priv->mutex); 626 mutex_unlock(&priv->mutex);
636 } 627 }
637} 628}
638EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 49493d176515..bc90a12408a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
169 169
170 return ret; 170 return ret;
171} 171}
172EXPORT_SYMBOL(iwl_send_add_sta);
173 172
174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 173static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
175 struct ieee80211_sta *sta, 174 struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
316 return sta_id; 315 return sta_id;
317 316
318} 317}
319EXPORT_SYMBOL_GPL(iwl_prep_station);
320 318
321#define STA_WAIT_TIMEOUT (HZ/2) 319#define STA_WAIT_TIMEOUT (HZ/2)
322 320
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
379 *sta_id_r = sta_id; 377 *sta_id_r = sta_id;
380 return ret; 378 return ret;
381} 379}
382EXPORT_SYMBOL(iwl_add_station_common);
383 380
384/** 381/**
385 * iwl_sta_ucode_deactivate - deactivate ucode status for a station 382 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
513 spin_unlock_irqrestore(&priv->sta_lock, flags); 510 spin_unlock_irqrestore(&priv->sta_lock, flags);
514 return -EINVAL; 511 return -EINVAL;
515} 512}
516EXPORT_SYMBOL_GPL(iwl_remove_station);
517 513
518/** 514/**
519 * iwl_clear_ucode_stations - clear ucode station table bits 515 * iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
548 if (!cleared) 544 if (!cleared)
549 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n"); 545 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
550} 546}
551EXPORT_SYMBOL(iwl_clear_ucode_stations);
552 547
553/** 548/**
554 * iwl_restore_stations() - Restore driver known stations to device 549 * iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
625 else 620 else
626 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n"); 621 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
627} 622}
628EXPORT_SYMBOL(iwl_restore_stations);
629 623
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 624void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{ 625{
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
668 priv->stations[sta_id].sta.sta.addr, ret); 662 priv->stations[sta_id].sta.sta.addr, ret);
669 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); 663 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
670} 664}
671EXPORT_SYMBOL(iwl_reprogram_ap_sta);
672 665
673int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 666int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
674{ 667{
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
680 673
681 return WEP_INVALID_OFFSET; 674 return WEP_INVALID_OFFSET;
682} 675}
683EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
684 676
685void iwl_dealloc_bcast_stations(struct iwl_priv *priv) 677void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
686{ 678{
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
700 } 692 }
701 spin_unlock_irqrestore(&priv->sta_lock, flags); 693 spin_unlock_irqrestore(&priv->sta_lock, flags);
702} 694}
703EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
704 695
705#ifdef CONFIG_IWLWIFI_DEBUG 696#ifdef CONFIG_IWLWIFI_DEBUG
706static void iwl_dump_lq_cmd(struct iwl_priv *priv, 697static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
810 } 801 }
811 return ret; 802 return ret;
812} 803}
813EXPORT_SYMBOL(iwl_send_lq_cmd);
814 804
815int iwl_mac_sta_remove(struct ieee80211_hw *hw, 805int iwl_mac_sta_remove(struct ieee80211_hw *hw,
816 struct ieee80211_vif *vif, 806 struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
832 mutex_unlock(&priv->mutex); 822 mutex_unlock(&priv->mutex);
833 return ret; 823 return ret;
834} 824}
835EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 073b6ce6141c..277c9175dcf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84 } 84 }
85 txq->need_update = 0; 85 txq->need_update = 0;
86} 86}
87EXPORT_SYMBOL(iwl_txq_update_write_ptr); 87
88/**
89 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
90 */
91void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
92{
93 struct iwl_tx_queue *txq = &priv->txq[txq_id];
94 struct iwl_queue *q = &txq->q;
95
96 if (q->n_bd == 0)
97 return;
98
99 while (q->write_ptr != q->read_ptr) {
100 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
101 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
102 }
103}
88 104
89/** 105/**
90 * iwl_tx_queue_free - Deallocate DMA queue. 106 * iwl_tx_queue_free - Deallocate DMA queue.
@@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
97void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 113void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
98{ 114{
99 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 115 struct iwl_tx_queue *txq = &priv->txq[txq_id];
100 struct iwl_queue *q = &txq->q;
101 struct device *dev = &priv->pci_dev->dev; 116 struct device *dev = &priv->pci_dev->dev;
102 int i; 117 int i;
103 118
104 if (q->n_bd == 0) 119 iwl_tx_queue_unmap(priv, txq_id);
105 return;
106
107 /* first, empty all BD's */
108 for (; q->write_ptr != q->read_ptr;
109 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
110 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
111 120
112 /* De-alloc array of command/tx buffers */ 121 /* De-alloc array of command/tx buffers */
113 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 122 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 /* 0-fill queue descriptor structure */ 140 /* 0-fill queue descriptor structure */
132 memset(txq, 0, sizeof(*txq)); 141 memset(txq, 0, sizeof(*txq));
133} 142}
134EXPORT_SYMBOL(iwl_tx_queue_free);
135 143
136/** 144/**
137 * iwl_cmd_queue_free - Deallocate DMA queue. 145 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
138 * @txq: Transmit queue to deallocate.
139 *
140 * Empty queue by removing and destroying all BD's.
141 * Free all buffers.
142 * 0-fill, but do not free "txq" descriptor structure.
143 */ 146 */
144void iwl_cmd_queue_free(struct iwl_priv *priv) 147void iwl_cmd_queue_unmap(struct iwl_priv *priv)
145{ 148{
146 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 149 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
147 struct iwl_queue *q = &txq->q; 150 struct iwl_queue *q = &txq->q;
148 struct device *dev = &priv->pci_dev->dev;
149 int i; 151 int i;
150 bool huge = false; 152 bool huge = false;
151 153
152 if (q->n_bd == 0) 154 if (q->n_bd == 0)
153 return; 155 return;
154 156
155 for (; q->read_ptr != q->write_ptr; 157 while (q->read_ptr != q->write_ptr) {
156 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
157 /* we have no way to tell if it is a huge cmd ATM */ 158 /* we have no way to tell if it is a huge cmd ATM */
158 i = get_cmd_index(q, q->read_ptr, 0); 159 i = get_cmd_index(q, q->read_ptr, 0);
159 160
160 if (txq->meta[i].flags & CMD_SIZE_HUGE) { 161 if (txq->meta[i].flags & CMD_SIZE_HUGE)
161 huge = true; 162 huge = true;
162 continue; 163 else
163 } 164 pci_unmap_single(priv->pci_dev,
165 dma_unmap_addr(&txq->meta[i], mapping),
166 dma_unmap_len(&txq->meta[i], len),
167 PCI_DMA_BIDIRECTIONAL);
164 168
165 pci_unmap_single(priv->pci_dev, 169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
166 dma_unmap_addr(&txq->meta[i], mapping),
167 dma_unmap_len(&txq->meta[i], len),
168 PCI_DMA_BIDIRECTIONAL);
169 } 170 }
171
170 if (huge) { 172 if (huge) {
171 i = q->n_window; 173 i = q->n_window;
172 pci_unmap_single(priv->pci_dev, 174 pci_unmap_single(priv->pci_dev,
@@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
174 dma_unmap_len(&txq->meta[i], len), 176 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL); 177 PCI_DMA_BIDIRECTIONAL);
176 } 178 }
179}
180
181/**
182 * iwl_cmd_queue_free - Deallocate DMA queue.
183 * @txq: Transmit queue to deallocate.
184 *
185 * Empty queue by removing and destroying all BD's.
186 * Free all buffers.
187 * 0-fill, but do not free "txq" descriptor structure.
188 */
189void iwl_cmd_queue_free(struct iwl_priv *priv)
190{
191 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
192 struct device *dev = &priv->pci_dev->dev;
193 int i;
194
195 iwl_cmd_queue_unmap(priv);
177 196
178 /* De-alloc array of command/tx buffers */ 197 /* De-alloc array of command/tx buffers */
179 for (i = 0; i <= TFD_CMD_SLOTS; i++) 198 for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
193 /* 0-fill queue descriptor structure */ 212 /* 0-fill queue descriptor structure */
194 memset(txq, 0, sizeof(*txq)); 213 memset(txq, 0, sizeof(*txq));
195} 214}
196EXPORT_SYMBOL(iwl_cmd_queue_free);
197 215
198/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
199 * DMA services 217 * DMA services
@@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
233 s = 0; 251 s = 0;
234 return s; 252 return s;
235} 253}
236EXPORT_SYMBOL(iwl_queue_space);
237 254
238 255
239/** 256/**
@@ -384,7 +401,6 @@ out_free_arrays:
384 401
385 return -ENOMEM; 402 return -ENOMEM;
386} 403}
387EXPORT_SYMBOL(iwl_tx_queue_init);
388 404
389void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 405void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
390 int slots_num, u32 txq_id) 406 int slots_num, u32 txq_id)
@@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
404 /* Tell device where to find queue */ 420 /* Tell device where to find queue */
405 priv->cfg->ops->lib->txq_init(priv, txq); 421 priv->cfg->ops->lib->txq_init(priv, txq);
406} 422}
407EXPORT_SYMBOL(iwl_tx_queue_reset);
408 423
409/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 424/*************** HOST COMMAND QUEUE FUNCTIONS *****/
410 425
@@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
641 } 656 }
642 meta->flags = 0; 657 meta->flags = 0;
643} 658}
644EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 9278b3c8ee30..d4005081f1df 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
225 lbtf_deb_leave(LBTF_DEB_MAIN); 225 lbtf_deb_leave(LBTF_DEB_MAIN);
226} 226}
227 227
228static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 228static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
229{ 229{
230 struct lbtf_private *priv = hw->priv; 230 struct lbtf_private *priv = hw->priv;
231 231
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
236 * there are no buffered multicast frames to send 236 * there are no buffered multicast frames to send
237 */ 237 */
238 ieee80211_stop_queues(priv->hw); 238 ieee80211_stop_queues(priv->hw);
239 return NETDEV_TX_OK;
240} 239}
241 240
242static void lbtf_tx_work(struct work_struct *work) 241static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 5d39b2840584..56f439d58013 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
541} 541}
542 542
543 543
544static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 544static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
545{ 545{
546 bool ack; 546 bool ack;
547 struct ieee80211_tx_info *txi; 547 struct ieee80211_tx_info *txi;
@@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
551 if (skb->len < 10) { 551 if (skb->len < 10) {
552 /* Should not happen; just a sanity check for addr1 use */ 552 /* Should not happen; just a sanity check for addr1 use */
553 dev_kfree_skb(skb); 553 dev_kfree_skb(skb);
554 return NETDEV_TX_OK; 554 return;
555 } 555 }
556 556
557 ack = mac80211_hwsim_tx_frame(hw, skb); 557 ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
571 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) 571 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
572 txi->flags |= IEEE80211_TX_STAT_ACK; 572 txi->flags |= IEEE80211_TX_STAT_ACK;
573 ieee80211_tx_status_irqsafe(hw, skb); 573 ieee80211_tx_status_irqsafe(hw, skb);
574 return NETDEV_TX_OK;
575} 574}
576 575
577 576
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index af4f2c64f242..df5959f36d0b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1535,6 +1535,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1535 1535
1536 info = IEEE80211_SKB_CB(skb); 1536 info = IEEE80211_SKB_CB(skb);
1537 ieee80211_tx_info_clear_status(info); 1537 ieee80211_tx_info_clear_status(info);
1538
1539 /* Rate control is happening in the firmware.
1540 * Ensure no tx rate is being reported.
1541 */
1542 info->status.rates[0].idx = -1;
1543 info->status.rates[0].count = 1;
1544
1538 if (MWL8K_TXD_SUCCESS(status)) 1545 if (MWL8K_TXD_SUCCESS(status))
1539 info->flags |= IEEE80211_TX_STAT_ACK; 1546 info->flags |= IEEE80211_TX_STAT_ACK;
1540 1547
@@ -1566,7 +1573,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1566 txq->txd = NULL; 1573 txq->txd = NULL;
1567} 1574}
1568 1575
1569static int 1576static void
1570mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1577mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1571{ 1578{
1572 struct mwl8k_priv *priv = hw->priv; 1579 struct mwl8k_priv *priv = hw->priv;
@@ -1628,7 +1635,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1628 wiphy_debug(hw->wiphy, 1635 wiphy_debug(hw->wiphy,
1629 "failed to dma map skb, dropping TX frame.\n"); 1636 "failed to dma map skb, dropping TX frame.\n");
1630 dev_kfree_skb(skb); 1637 dev_kfree_skb(skb);
1631 return NETDEV_TX_OK; 1638 return;
1632 } 1639 }
1633 1640
1634 spin_lock_bh(&priv->tx_lock); 1641 spin_lock_bh(&priv->tx_lock);
@@ -1665,8 +1672,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1665 mwl8k_tx_start(priv); 1672 mwl8k_tx_start(priv);
1666 1673
1667 spin_unlock_bh(&priv->tx_lock); 1674 spin_unlock_bh(&priv->tx_lock);
1668
1669 return NETDEV_TX_OK;
1670} 1675}
1671 1676
1672 1677
@@ -2121,8 +2126,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2121 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2126 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
2122 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2127 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
2123 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2128 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
2124 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2129
2125 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 2130 /*
2131 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
2132 * that order. Firmware has Q3 as highest priority and Q0 as lowest
2133 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
2134 * priority is interpreted the right way in firmware.
2135 */
2136 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
2137 int j = MWL8K_TX_QUEUES - 1 - i;
2138 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
2139 }
2140
2126 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2141 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2127 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2142 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2128 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON); 2143 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -3725,22 +3740,19 @@ static void mwl8k_rx_poll(unsigned long data)
3725/* 3740/*
3726 * Core driver operations. 3741 * Core driver operations.
3727 */ 3742 */
3728static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3743static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3729{ 3744{
3730 struct mwl8k_priv *priv = hw->priv; 3745 struct mwl8k_priv *priv = hw->priv;
3731 int index = skb_get_queue_mapping(skb); 3746 int index = skb_get_queue_mapping(skb);
3732 int rc;
3733 3747
3734 if (!priv->radio_on) { 3748 if (!priv->radio_on) {
3735 wiphy_debug(hw->wiphy, 3749 wiphy_debug(hw->wiphy,
3736 "dropped TX frame since radio disabled\n"); 3750 "dropped TX frame since radio disabled\n");
3737 dev_kfree_skb(skb); 3751 dev_kfree_skb(skb);
3738 return NETDEV_TX_OK; 3752 return;
3739 } 3753 }
3740 3754
3741 rc = mwl8k_txq_xmit(hw, index, skb); 3755 mwl8k_txq_xmit(hw, index, skb);
3742
3743 return rc;
3744} 3756}
3745 3757
3746static int mwl8k_start(struct ieee80211_hw *hw) 3758static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3945,9 +3957,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
3945 if (rc) 3957 if (rc)
3946 goto out; 3958 goto out;
3947 3959
3948 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); 3960 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
3949 if (!rc) 3961 if (rc)
3950 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); 3962 wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
3963 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
3964 if (rc)
3965 wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
3966
3951 } else { 3967 } else {
3952 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); 3968 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3953 if (rc) 3969 if (rc)
@@ -4320,12 +4336,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
4320 if (!priv->wmm_enabled) 4336 if (!priv->wmm_enabled)
4321 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 4337 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
4322 4338
4323 if (!rc) 4339 if (!rc) {
4324 rc = mwl8k_cmd_set_edca_params(hw, queue, 4340 int q = MWL8K_TX_QUEUES - 1 - queue;
4341 rc = mwl8k_cmd_set_edca_params(hw, q,
4325 params->cw_min, 4342 params->cw_min,
4326 params->cw_max, 4343 params->cw_max,
4327 params->aifs, 4344 params->aifs,
4328 params->txop); 4345 params->txop);
4346 }
4329 4347
4330 mwl8k_fw_unlock(hw); 4348 mwl8k_fw_unlock(hw);
4331 } 4349 }
@@ -4760,7 +4778,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4760 hw->queues = MWL8K_TX_QUEUES; 4778 hw->queues = MWL8K_TX_QUEUES;
4761 4779
4762 /* Set rssi values to dBm */ 4780 /* Set rssi values to dBm */
4763 hw->flags |= IEEE80211_HW_SIGNAL_DBM; 4781 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
4764 hw->vif_data_size = sizeof(struct mwl8k_vif); 4782 hw->vif_data_size = sizeof(struct mwl8k_vif);
4765 hw->sta_data_size = sizeof(struct mwl8k_sta); 4783 hw->sta_data_size = sizeof(struct mwl8k_sta);
4766 4784
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 86cb54c842e7..e99ca1c1e0d8 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
111 111
112 freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel)); 112 freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
113 channel = ieee80211_get_channel(wiphy, freq); 113 channel = ieee80211_get_channel(wiphy, freq);
114 if (!channel) {
115 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
116 bss->a.channel, freq);
117 return; /* Then ignore it for now */
118 }
114 timestamp = 0; 119 timestamp = 0;
115 capability = le16_to_cpu(bss->a.capabilities); 120 capability = le16_to_cpu(bss->a.capabilities);
116 beacon_interval = le16_to_cpu(bss->a.beacon_interv); 121 beacon_interval = le16_to_cpu(bss->a.beacon_interv);
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index f54e15fcd623..13d750da9301 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -524,10 +524,13 @@ err_data:
524 524
525struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq) 525struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
526{ 526{
527 struct p54_rssi_db_entry *entry = (void *)(priv->rssi_db->data + 527 struct p54_rssi_db_entry *entry;
528 priv->rssi_db->offset);
529 int i, found = -1; 528 int i, found = -1;
530 529
530 if (!priv->rssi_db)
531 return &p54_rssi_default;
532
533 entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
531 for (i = 0; i < priv->rssi_db->entries; i++) { 534 for (i = 0; i < priv->rssi_db->entries; i++) {
532 if (!same_band(freq, entry[i].freq)) 535 if (!same_band(freq, entry[i].freq))
533 continue; 536 continue;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 0d3d108f6fe2..2fab7d20ffc2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -559,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
559{ 559{
560 struct sk_buff *skb; 560 struct sk_buff *skb;
561 struct p54_edcf *edcf; 561 struct p54_edcf *edcf;
562 u8 rtd;
562 563
563 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), 564 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
564 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); 565 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -575,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
575 edcf->sifs = 0x0a; 576 edcf->sifs = 0x0a;
576 edcf->eofpad = 0x06; 577 edcf->eofpad = 0x06;
577 } 578 }
579 /*
580 * calculate the extra round trip delay according to the
581 * formula from 802.11-2007 17.3.8.6.
582 */
583 rtd = 3 * priv->coverage_class;
584 edcf->slottime += rtd;
585 edcf->round_trip_delay = cpu_to_le16(rtd);
578 /* (see prism54/isl_oid.h for further details) */ 586 /* (see prism54/isl_oid.h for further details) */
579 edcf->frameburst = cpu_to_le16(0); 587 edcf->frameburst = cpu_to_le16(0);
580 edcf->round_trip_delay = cpu_to_le16(0);
581 edcf->flags = 0; 588 edcf->flags = 0;
582 memset(edcf->mapping, 0, sizeof(edcf->mapping)); 589 memset(edcf->mapping, 0, sizeof(edcf->mapping));
583 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); 590 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 5ca117e6f95b..eb581abc1079 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
526void p54_unregister_leds(struct p54_common *priv); 526void p54_unregister_leds(struct p54_common *priv);
527 527
528/* xmit functions */ 528/* xmit functions */
529int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); 529void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
530int p54_tx_cancel(struct p54_common *priv, __le32 req_id); 530int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
531void p54_tx(struct p54_common *priv, struct sk_buff *skb); 531void p54_tx(struct p54_common *priv, struct sk_buff *skb);
532 532
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index e14a05bbc485..356e6bb443a6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
157 * to cancel the old beacon template by hand, instead the firmware 157 * to cancel the old beacon template by hand, instead the firmware
158 * will release the previous one through the feedback mechanism. 158 * will release the previous one through the feedback mechanism.
159 */ 159 */
160 WARN_ON(p54_tx_80211(priv->hw, beacon)); 160 p54_tx_80211(priv->hw, beacon);
161 priv->tsf_high32 = 0; 161 priv->tsf_high32 = 0;
162 priv->tsf_low32 = 0; 162 priv->tsf_low32 = 0;
163 163
@@ -566,6 +566,17 @@ static void p54_flush(struct ieee80211_hw *dev, bool drop)
566 WARN(total, "tx flush timeout, unresponsive firmware"); 566 WARN(total, "tx flush timeout, unresponsive firmware");
567} 567}
568 568
569static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
570{
571 struct p54_common *priv = dev->priv;
572
573 mutex_lock(&priv->conf_mutex);
574 /* support all coverage class values as in 802.11-2007 Table 7-27 */
575 priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
576 p54_set_edcf(priv);
577 mutex_unlock(&priv->conf_mutex);
578}
579
569static const struct ieee80211_ops p54_ops = { 580static const struct ieee80211_ops p54_ops = {
570 .tx = p54_tx_80211, 581 .tx = p54_tx_80211,
571 .start = p54_start, 582 .start = p54_start,
@@ -584,6 +595,7 @@ static const struct ieee80211_ops p54_ops = {
584 .conf_tx = p54_conf_tx, 595 .conf_tx = p54_conf_tx,
585 .get_stats = p54_get_stats, 596 .get_stats = p54_get_stats,
586 .get_survey = p54_get_survey, 597 .get_survey = p54_get_survey,
598 .set_coverage_class = p54_set_coverage_class,
587}; 599};
588 600
589struct ieee80211_hw *p54_init_common(size_t priv_data_len) 601struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index f951c8f31863..50730fc23fe5 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -217,6 +217,7 @@ struct p54_common {
217 u32 tsf_low32, tsf_high32; 217 u32 tsf_low32, tsf_high32;
218 u32 basic_rate_mask; 218 u32 basic_rate_mask;
219 u16 aid; 219 u16 aid;
220 u8 coverage_class;
220 bool powersave_override; 221 bool powersave_override;
221 __le32 beacon_req_id; 222 __le32 beacon_req_id;
222 struct completion beacon_comp; 223 struct completion beacon_comp;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
199 while (i != idx) { 199 while (i != idx) {
200 u16 len; 200 u16 len;
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 dma_addr_t dma_addr;
202 desc = &ring[i]; 203 desc = &ring[i];
203 len = le16_to_cpu(desc->len); 204 len = le16_to_cpu(desc->len);
204 skb = rx_buf[i]; 205 skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
216 217
217 len = priv->common.rx_mtu; 218 len = priv->common.rx_mtu;
218 } 219 }
220 dma_addr = le32_to_cpu(desc->host_addr);
221 pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
222 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
219 skb_put(skb, len); 223 skb_put(skb, len);
220 224
221 if (p54_rx(dev, skb)) { 225 if (p54_rx(dev, skb)) {
222 pci_unmap_single(priv->pdev, 226 pci_unmap_single(priv->pdev, dma_addr,
223 le32_to_cpu(desc->host_addr), 227 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
224 priv->common.rx_mtu + 32,
225 PCI_DMA_FROMDEVICE);
226 rx_buf[i] = NULL; 228 rx_buf[i] = NULL;
227 desc->host_addr = 0; 229 desc->host_addr = cpu_to_le32(0);
228 } else { 230 } else {
229 skb_trim(skb, 0); 231 skb_trim(skb, 0);
232 pci_dma_sync_single_for_device(priv->pdev, dma_addr,
233 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
230 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); 234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
231 } 235 }
232 236
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c4..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
101 {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 917d5d948e3c..7834c26c2954 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -367,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
367 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; 367 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
368 priv->tsf_low32 = tsf32; 368 priv->tsf_low32 = tsf32;
369 369
370 rx_status->flag |= RX_FLAG_TSFT; 370 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
371 371
372 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) 372 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
373 header_len += hdr->align[0]; 373 header_len += hdr->align[0];
@@ -696,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
696 } 696 }
697} 697}
698 698
699int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) 699void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
700{ 700{
701 struct p54_common *priv = dev->priv; 701 struct p54_common *priv = dev->priv;
702 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 702 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -717,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
717 &hdr_flags, &aid, &burst_allowed); 717 &hdr_flags, &aid, &burst_allowed);
718 718
719 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 719 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
720 if (!IS_QOS_QUEUE(queue)) { 720 dev_kfree_skb_any(skb);
721 dev_kfree_skb_any(skb); 721 return;
722 return NETDEV_TX_OK;
723 } else {
724 return NETDEV_TX_BUSY;
725 }
726 } 722 }
727 723
728 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; 724 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -865,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
865 p54info->extra_len = extra_len; 861 p54info->extra_len = extra_len;
866 862
867 p54_tx(priv, skb); 863 p54_tx(priv, skb);
868 return NETDEV_TX_OK;
869} 864}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce247..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2597 __le32 mode; 2597 __le32 mode;
2598 int ret; 2598 int ret;
2599 2599
2600 if (priv->device_type != RNDIS_BCM4320B)
2601 return -ENOTSUPP;
2602
2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
2601 enabled ? "enabled" : "disabled", 2604 enabled ? "enabled" : "disabled",
2602 timeout); 2605 timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 1df432c1f2c7..19453d23e90d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1185,7 +1185,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
1185/* 1185/*
1186 * mac80211 handlers. 1186 * mac80211 handlers.
1187 */ 1187 */
1188int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 1188void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
1189int rt2x00mac_start(struct ieee80211_hw *hw); 1189int rt2x00mac_start(struct ieee80211_hw *hw);
1190void rt2x00mac_stop(struct ieee80211_hw *hw); 1190void rt2x00mac_stop(struct ieee80211_hw *hw);
1191int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1191int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 1b3edef9e3d2..c2c35838c2f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
99 return retval; 99 return retval;
100} 100}
101 101
102int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 102void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
103{ 103{
104 struct rt2x00_dev *rt2x00dev = hw->priv; 104 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -155,12 +155,11 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
155 if (rt2x00queue_threshold(queue)) 155 if (rt2x00queue_threshold(queue))
156 rt2x00queue_pause_queue(queue); 156 rt2x00queue_pause_queue(queue);
157 157
158 return NETDEV_TX_OK; 158 return;
159 159
160 exit_fail: 160 exit_fail:
161 ieee80211_stop_queue(rt2x00dev->hw, qid); 161 ieee80211_stop_queue(rt2x00dev->hw, qid);
162 dev_kfree_skb_any(skb); 162 dev_kfree_skb_any(skb);
163 return NETDEV_TX_OK;
164} 163}
165EXPORT_SYMBOL_GPL(rt2x00mac_tx); 164EXPORT_SYMBOL_GPL(rt2x00mac_tx);
166 165
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 5851cbc1e957..80db5cabc9b9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
146 rx_status.freq = dev->conf.channel->center_freq; 146 rx_status.freq = dev->conf.channel->center_freq;
147 rx_status.band = dev->conf.channel->band; 147 rx_status.band = dev->conf.channel->band;
148 rx_status.mactime = le64_to_cpu(entry->tsft); 148 rx_status.mactime = le64_to_cpu(entry->tsft);
149 rx_status.flag |= RX_FLAG_TSFT; 149 rx_status.flag |= RX_FLAG_MACTIME_MPDU;
150 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 150 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
152 152
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
240 return IRQ_HANDLED; 240 return IRQ_HANDLED;
241} 241}
242 242
243static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 243static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
244{ 244{
245 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 245 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
246 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 246 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
321 spin_unlock_irqrestore(&priv->lock, flags); 321 spin_unlock_irqrestore(&priv->lock, flags);
322 322
323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
324
325 return 0;
326} 324}
327 325
328void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam) 326void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
687 struct ieee80211_hw *dev = vif_priv->dev; 685 struct ieee80211_hw *dev = vif_priv->dev;
688 struct ieee80211_mgmt *mgmt; 686 struct ieee80211_mgmt *mgmt;
689 struct sk_buff *skb; 687 struct sk_buff *skb;
690 int err = 0;
691 688
692 /* don't overflow the tx ring */ 689 /* don't overflow the tx ring */
693 if (ieee80211_queue_stopped(dev, 0)) 690 if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
708 /* TODO: use actual beacon queue */ 705 /* TODO: use actual beacon queue */
709 skb_set_queue_mapping(skb, 0); 706 skb_set_queue_mapping(skb, 0);
710 707
711 err = rtl8180_tx(dev, skb); 708 rtl8180_tx(dev, skb);
712 WARN_ON(err);
713 709
714resched: 710resched:
715 /* 711 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 6b82cac37ee3..c5a5e788f25f 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
227 } 227 }
228} 228}
229 229
230static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 230static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
231{ 231{
232 struct rtl8187_priv *priv = dev->priv; 232 struct rtl8187_priv *priv = dev->priv;
233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
241 urb = usb_alloc_urb(0, GFP_ATOMIC); 241 urb = usb_alloc_urb(0, GFP_ATOMIC);
242 if (!urb) { 242 if (!urb) {
243 kfree_skb(skb); 243 kfree_skb(skb);
244 return NETDEV_TX_OK; 244 return;
245 } 245 }
246 246
247 flags = skb->len; 247 flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
309 kfree_skb(skb); 309 kfree_skb(skb);
310 } 310 }
311 usb_free_urb(urb); 311 usb_free_urb(urb);
312
313 return NETDEV_TX_OK;
314} 312}
315 313
316static void rtl8187_rx_cb(struct urb *urb) 314static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
373 rx_status.rate_idx = rate; 371 rx_status.rate_idx = rate;
374 rx_status.freq = dev->conf.channel->center_freq; 372 rx_status.freq = dev->conf.channel->center_freq;
375 rx_status.band = dev->conf.channel->band; 373 rx_status.band = dev->conf.channel->band;
376 rx_status.flag |= RX_FLAG_TSFT; 374 rx_status.flag |= RX_FLAG_MACTIME_MPDU;
377 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 375 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
378 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 376 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
379 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 377 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 86f8d4d64037..ce49e0ce7cad 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -3,6 +3,7 @@ config RTL8192CE
3 depends on MAC80211 && PCI && EXPERIMENTAL 3 depends on MAC80211 && PCI && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select RTLWIFI 5 select RTLWIFI
6 select RTL8192C_COMMON
6 ---help--- 7 ---help---
7 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe 8 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
8 wireless network adapters. 9 wireless network adapters.
@@ -14,6 +15,7 @@ config RTL8192CU
14 depends on MAC80211 && USB && EXPERIMENTAL 15 depends on MAC80211 && USB && EXPERIMENTAL
15 select FW_LOADER 16 select FW_LOADER
16 select RTLWIFI 17 select RTLWIFI
18 select RTL8192C_COMMON
17 ---help--- 19 ---help---
18 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB 20 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
19 wireless network adapters. 21 wireless network adapters.
@@ -24,3 +26,8 @@ config RTLWIFI
24 tristate 26 tristate
25 depends on RTL8192CE || RTL8192CU 27 depends on RTL8192CE || RTL8192CU
26 default m 28 default m
29
30config RTL8192C_COMMON
31 tristate
32 depends on RTL8192CE || RTL8192CU
33 default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index c3e83a1da33b..9192fd583413 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -5,12 +5,18 @@ rtlwifi-objs := \
5 core.o \ 5 core.o \
6 debug.o \ 6 debug.o \
7 efuse.o \ 7 efuse.o \
8 pci.o \
9 ps.o \ 8 ps.o \
10 rc.o \ 9 rc.o \
11 regd.o \ 10 regd.o \
12 usb.o 11 usb.o
13 12
13rtl8192c_common-objs += \
14
15ifeq ($(CONFIG_PCI),y)
16rtlwifi-objs += pci.o
17endif
18
19obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
14obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 20obj-$(CONFIG_RTL8192CE) += rtl8192ce/
15obj-$(CONFIG_RTL8192CU) += rtl8192cu/ 21obj-$(CONFIG_RTL8192CU) += rtl8192cu/
16 22
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 3f40dc2b129c..bb0c781f4a1b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
283 rtlmac->hw = hw; 283 rtlmac->hw = hw;
284 284
285 /* <2> rate control register */ 285 /* <2> rate control register */
286 if (rtl_rate_control_register()) { 286 hw->rate_control_algorithm = "rtl_rc";
287 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
288 ("rtl: Unable to register rtl_rc,"
289 "use default RC !!\n"));
290 } else {
291 hw->rate_control_algorithm = "rtl_rc";
292 }
293 287
294 /* 288 /*
295 * <3> init CRDA must come after init 289 * <3> init CRDA must come after init
@@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
325 319
326void rtl_deinit_core(struct ieee80211_hw *hw) 320void rtl_deinit_core(struct ieee80211_hw *hw)
327{ 321{
328 /*RC*/
329 rtl_rate_control_unregister();
330} 322}
331 323
332void rtl_init_rx_config(struct ieee80211_hw *hw) 324void rtl_init_rx_config(struct ieee80211_hw *hw)
@@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
945 937
946static int __init rtl_core_module_init(void) 938static int __init rtl_core_module_init(void)
947{ 939{
940 if (rtl_rate_control_register())
941 printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
942 "use default RC !!\n");
948 return 0; 943 return 0;
949} 944}
950 945
951static void __exit rtl_core_module_exit(void) 946static void __exit rtl_core_module_exit(void)
952{ 947{
948 /*RC*/
949 rtl_rate_control_unregister();
953} 950}
954 951
955module_init(rtl_core_module_init); 952module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index b0996bf8a214..e4f4aee8f298 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
82 mutex_unlock(&rtlpriv->locks.conf_mutex); 82 mutex_unlock(&rtlpriv->locks.conf_mutex);
83} 83}
84 84
85static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 85static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
86{ 86{
87 struct rtl_priv *rtlpriv = rtl_priv(hw); 87 struct rtl_priv *rtlpriv = rtl_priv(hw);
88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
97 97
98 rtlpriv->intf_ops->adapter_tx(hw, skb); 98 rtlpriv->intf_ops->adapter_tx(hw, skb);
99 99
100 return NETDEV_TX_OK; 100 return;
101 101
102err_free: 102err_free:
103 dev_kfree_skb_any(skb); 103 dev_kfree_skb_any(skb);
104 return NETDEV_TX_OK;
105} 104}
106 105
107static int rtl_op_add_interface(struct ieee80211_hw *hw, 106static int rtl_op_add_interface(struct ieee80211_hw *hw,
@@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
552 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 551 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
553 ("BSS_CHANGED_HT\n")); 552 ("BSS_CHANGED_HT\n"));
554 553
554 rcu_read_lock();
555 sta = ieee80211_find_sta(mac->vif, mac->bssid); 555 sta = ieee80211_find_sta(mac->vif, mac->bssid);
556 556
557 if (sta) { 557 if (sta) {
@@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
564 mac->current_ampdu_factor = 564 mac->current_ampdu_factor =
565 sta->ht_cap.ampdu_factor; 565 sta->ht_cap.ampdu_factor;
566 } 566 }
567 rcu_read_unlock();
567 568
568 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, 569 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
569 (u8 *) (&mac->max_mss_density)); 570 (u8 *) (&mac->max_mss_density));
@@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
615 else 616 else
616 mac->mode = WIRELESS_MODE_G; 617 mac->mode = WIRELESS_MODE_G;
617 618
619 rcu_read_lock();
618 sta = ieee80211_find_sta(mac->vif, mac->bssid); 620 sta = ieee80211_find_sta(mac->vif, mac->bssid);
619 621
620 if (sta) { 622 if (sta) {
@@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
649 */ 651 */
650 } 652 }
651 } 653 }
654 rcu_read_unlock();
652 655
653 /*mac80211 just give us CCK rates any time 656 /*mac80211 just give us CCK rates any time
654 *So we add G rate in basic rates when 657 *So we add G rate in basic rates when
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 62876cd5c41a..4f92cba6810a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
1169 return word_cnts; 1169 return word_cnts;
1170} 1170}
1171 1171
1172void efuse_reset_loader(struct ieee80211_hw *hw)
1173{
1174 struct rtl_priv *rtlpriv = rtl_priv(hw);
1175 u16 tmp_u2b;
1176
1177 tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
1178 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1179 (tmp_u2b & ~(BIT(12))));
1180 udelay(10000);
1181 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1182 (tmp_u2b | BIT(12)));
1183 udelay(10000);
1184}
1185
1186bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
1187{
1188 return true;
1189}
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 2d39a4df181b..47774dd4c2a6 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
117extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); 117extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
118extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); 118extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
119extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); 119extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
120extern bool efuse_program_map(struct ieee80211_hw *hw,
121 char *p_filename, u8 tabletype);
122extern void efuse_reset_loader(struct ieee80211_hw *hw);
123 120
124#endif 121#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 1f18bf7df741..9cd7703c2a30 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1477,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1477 struct pci_dev *bridge_pdev = pdev->bus->self; 1477 struct pci_dev *bridge_pdev = pdev->bus->self;
1478 u16 venderid; 1478 u16 venderid;
1479 u16 deviceid; 1479 u16 deviceid;
1480 u8 revisionid;
1481 u16 irqline; 1480 u16 irqline;
1482 u8 tmp; 1481 u8 tmp;
1483 1482
1484 venderid = pdev->vendor; 1483 venderid = pdev->vendor;
1485 deviceid = pdev->device; 1484 deviceid = pdev->device;
1486 pci_read_config_byte(pdev, 0x8, &revisionid);
1487 pci_read_config_word(pdev, 0x3C, &irqline); 1485 pci_read_config_word(pdev, 0x3C, &irqline);
1488 1486
1489 if (deviceid == RTL_PCI_8192_DID || 1487 if (deviceid == RTL_PCI_8192_DID ||
@@ -1494,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1494 deviceid == RTL_PCI_8173_DID || 1492 deviceid == RTL_PCI_8173_DID ||
1495 deviceid == RTL_PCI_8172_DID || 1493 deviceid == RTL_PCI_8172_DID ||
1496 deviceid == RTL_PCI_8171_DID) { 1494 deviceid == RTL_PCI_8171_DID) {
1497 switch (revisionid) { 1495 switch (pdev->revision) {
1498 case RTL_PCI_REVISION_ID_8192PCIE: 1496 case RTL_PCI_REVISION_ID_8192PCIE:
1499 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1497 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1500 ("8192 PCI-E is found - " 1498 ("8192 PCI-E is found - "
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644
index 000000000000..aee42d7ae8a2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
@@ -0,0 +1,9 @@
1rtl8192c-common-objs := \
2 main.o \
3 dm_common.o \
4 fw_common.o \
5 phy_common.o
6
7obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
8
9ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index b4f1e4e6b733..bb023274414c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -27,6 +27,8 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include "dm_common.h"
31
30struct dig_t dm_digtable; 32struct dig_t dm_digtable;
31static struct ps_t dm_pstable; 33static struct ps_t dm_pstable;
32 34
@@ -517,6 +519,7 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
517 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; 519 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
518 } 520 }
519} 521}
522EXPORT_SYMBOL(rtl92c_dm_write_dig);
520 523
521static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) 524static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
522{ 525{
@@ -554,6 +557,7 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
554 rtlpriv->dm.is_any_nonbepkts = false; 557 rtlpriv->dm.is_any_nonbepkts = false;
555 rtlpriv->dm.is_cur_rdlstate = false; 558 rtlpriv->dm.is_cur_rdlstate = false;
556} 559}
560EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
557 561
558static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) 562static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
559{ 563{
@@ -1103,6 +1107,7 @@ void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1103{ 1107{
1104 rtl92c_dm_check_txpower_tracking_thermal_meter(hw); 1108 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1105} 1109}
1110EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
1106 1111
1107void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) 1112void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1108{ 1113{
@@ -1118,6 +1123,7 @@ void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1118 rtlpriv->dm.useramask = false; 1123 rtlpriv->dm.useramask = false;
1119 1124
1120} 1125}
1126EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
1121 1127
1122static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) 1128static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1123{ 1129{
@@ -1307,6 +1313,7 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1307 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; 1313 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1308 } 1314 }
1309} 1315}
1316EXPORT_SYMBOL(rtl92c_dm_rf_saving);
1310 1317
1311static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1318static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1312{ 1319{
@@ -1360,6 +1367,7 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
1360 rtl92c_dm_initialize_txpower_tracking(hw); 1367 rtl92c_dm_initialize_txpower_tracking(hw);
1361 rtl92c_dm_init_dynamic_bb_powersaving(hw); 1368 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1362} 1369}
1370EXPORT_SYMBOL(rtl92c_dm_init);
1363 1371
1364void rtl92c_dm_watchdog(struct ieee80211_hw *hw) 1372void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1365{ 1373{
@@ -1380,9 +1388,11 @@ void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1380 rtl92c_dm_dig(hw); 1388 rtl92c_dm_dig(hw);
1381 rtl92c_dm_false_alarm_counter_statistics(hw); 1389 rtl92c_dm_false_alarm_counter_statistics(hw);
1382 rtl92c_dm_dynamic_bb_powersaving(hw); 1390 rtl92c_dm_dynamic_bb_powersaving(hw);
1383 rtl92c_dm_dynamic_txpower(hw); 1391 rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
1384 rtl92c_dm_check_txpower_tracking(hw); 1392 rtl92c_dm_check_txpower_tracking(hw);
1385 rtl92c_dm_refresh_rate_adaptive_mask(hw); 1393 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1386 rtl92c_dm_check_edca_turbo(hw); 1394 rtl92c_dm_check_edca_turbo(hw);
1395
1387 } 1396 }
1388} 1397}
1398EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644
index 000000000000..b9cbb0a3c03f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -0,0 +1,204 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92COMMON_DM_H__
31#define __RTL92COMMON_DM_H__
32
33#include "../wifi.h"
34#include "../rtl8192ce/def.h"
35#include "../rtl8192ce/reg.h"
36#include "fw_common.h"
37
38#define HAL_DM_DIG_DISABLE BIT(0)
39#define HAL_DM_HIPWR_DISABLE BIT(1)
40
41#define OFDM_TABLE_LENGTH 37
42#define CCK_TABLE_LENGTH 33
43
44#define OFDM_TABLE_SIZE 37
45#define CCK_TABLE_SIZE 33
46
47#define BW_AUTO_SWITCH_HIGH_LOW 25
48#define BW_AUTO_SWITCH_LOW_HIGH 30
49
50#define DM_DIG_THRESH_HIGH 40
51#define DM_DIG_THRESH_LOW 35
52
53#define DM_FALSEALARM_THRESH_LOW 400
54#define DM_FALSEALARM_THRESH_HIGH 1000
55
56#define DM_DIG_MAX 0x3e
57#define DM_DIG_MIN 0x1e
58
59#define DM_DIG_FA_UPPER 0x32
60#define DM_DIG_FA_LOWER 0x20
61#define DM_DIG_FA_TH0 0x20
62#define DM_DIG_FA_TH1 0x100
63#define DM_DIG_FA_TH2 0x200
64
65#define DM_DIG_BACKOFF_MAX 12
66#define DM_DIG_BACKOFF_MIN -4
67#define DM_DIG_BACKOFF_DEFAULT 10
68
69#define RXPATHSELECTION_SS_TH_lOW 30
70#define RXPATHSELECTION_DIFF_TH 18
71
72#define DM_RATR_STA_INIT 0
73#define DM_RATR_STA_HIGH 1
74#define DM_RATR_STA_MIDDLE 2
75#define DM_RATR_STA_LOW 3
76
77#define CTS2SELF_THVAL 30
78#define REGC38_TH 20
79
80#define WAIOTTHVal 25
81
82#define TXHIGHPWRLEVEL_NORMAL 0
83#define TXHIGHPWRLEVEL_LEVEL1 1
84#define TXHIGHPWRLEVEL_LEVEL2 2
85#define TXHIGHPWRLEVEL_BT1 3
86#define TXHIGHPWRLEVEL_BT2 4
87
88#define DM_TYPE_BYFW 0
89#define DM_TYPE_BYDRIVER 1
90
91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
93
94struct ps_t {
95 u8 pre_ccastate;
96 u8 cur_ccasate;
97 u8 pre_rfstate;
98 u8 cur_rfstate;
99 long rssi_val_min;
100};
101
102struct dig_t {
103 u8 dig_enable_flag;
104 u8 dig_ext_port_stage;
105 u32 rssi_lowthresh;
106 u32 rssi_highthresh;
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109 u8 cursta_connectctate;
110 u8 presta_connectstate;
111 u8 curmultista_connectstate;
112 u8 pre_igvalue;
113 u8 cur_igvalue;
114 char backoff_val;
115 char backoff_val_range_max;
116 char backoff_val_range_min;
117 u8 rx_gain_range_max;
118 u8 rx_gain_range_min;
119 u8 rssi_val_min;
120 u8 pre_cck_pd_state;
121 u8 cur_cck_pd_state;
122 u8 pre_cck_fa_state;
123 u8 cur_cck_fa_state;
124 u8 pre_ccastate;
125 u8 cur_ccasate;
126};
127
128struct swat_t {
129 u8 failure_cnt;
130 u8 try_flag;
131 u8 stop_trying;
132 long pre_rssi;
133 long trying_threshold;
134 u8 cur_antenna;
135 u8 pre_antenna;
136};
137
138enum tag_dynamic_init_gain_operation_type_definition {
139 DIG_TYPE_THRESH_HIGH = 0,
140 DIG_TYPE_THRESH_LOW = 1,
141 DIG_TYPE_BACKOFF = 2,
142 DIG_TYPE_RX_GAIN_MIN = 3,
143 DIG_TYPE_RX_GAIN_MAX = 4,
144 DIG_TYPE_ENABLE = 5,
145 DIG_TYPE_DISABLE = 6,
146 DIG_OP_TYPE_MAX
147};
148
149enum tag_cck_packet_detection_threshold_type_definition {
150 CCK_PD_STAGE_LowRssi = 0,
151 CCK_PD_STAGE_HighRssi = 1,
152 CCK_FA_STAGE_Low = 2,
153 CCK_FA_STAGE_High = 3,
154 CCK_PD_STAGE_MAX = 4,
155};
156
157enum dm_1r_cca_e {
158 CCA_1R = 0,
159 CCA_2R = 1,
160 CCA_MAX = 2,
161};
162
163enum dm_rf_e {
164 RF_SAVE = 0,
165 RF_NORMAL = 1,
166 RF_MAX = 2,
167};
168
169enum dm_sw_ant_switch_e {
170 ANS_ANTENNA_B = 1,
171 ANS_ANTENNA_A = 2,
172 ANS_ANTENNA_MAX = 3,
173};
174
175enum dm_dig_ext_port_alg_e {
176 DIG_EXT_PORT_STAGE_0 = 0,
177 DIG_EXT_PORT_STAGE_1 = 1,
178 DIG_EXT_PORT_STAGE_2 = 2,
179 DIG_EXT_PORT_STAGE_3 = 3,
180 DIG_EXT_PORT_STAGE_MAX = 4,
181};
182
183enum dm_dig_connect_e {
184 DIG_STA_DISCONNECT = 0,
185 DIG_STA_CONNECT = 1,
186 DIG_STA_BEFORE_CONNECT = 2,
187 DIG_MULTISTA_DISCONNECT = 3,
188 DIG_MULTISTA_CONNECT = 4,
189 DIG_CONNECT_MAX
190};
191
192extern struct dig_t dm_digtable;
193void rtl92c_dm_init(struct ieee80211_hw *hw);
194void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
195void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
196void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
197void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
198void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
199void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
200void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
201void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
202void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
203
204#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 11c8bdb4af59..5ef91374b230 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -31,10 +31,9 @@
31#include "../wifi.h" 31#include "../wifi.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../base.h" 33#include "../base.h"
34#include "reg.h" 34#include "../rtl8192ce/reg.h"
35#include "def.h" 35#include "../rtl8192ce/def.h"
36#include "fw.h" 36#include "fw_common.h"
37#include "table.h"
38 37
39static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) 38static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
40{ 39{
@@ -279,6 +278,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
279 278
280 return 0; 279 return 0;
281} 280}
281EXPORT_SYMBOL(rtl92c_download_fw);
282 282
283static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) 283static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
284{ 284{
@@ -517,6 +517,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
517 517
518 return; 518 return;
519} 519}
520EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
520 521
521void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) 522void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
522{ 523{
@@ -537,6 +538,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
537 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); 538 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
538 } 539 }
539} 540}
541EXPORT_SYMBOL(rtl92c_firmware_selfreset);
540 542
541void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) 543void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
542{ 544{
@@ -557,6 +559,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
557 rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); 559 rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
558 560
559} 561}
562EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
560 563
561#define BEACON_PG 0 /*->1*/ 564#define BEACON_PG 0 /*->1*/
562#define PSPOLL_PG 2 565#define PSPOLL_PG 2
@@ -758,6 +761,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
758 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 761 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
759 ("Set RSVD page location to Fw FAIL!!!!!!.\n")); 762 ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
760} 763}
764EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
761 765
762void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) 766void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
763{ 767{
@@ -767,3 +771,4 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
767 771
768 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); 772 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
769} 773}
774EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3db33bd14666..3db33bd14666 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.h b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
index a3bbac811d08..2f624fc27499 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -27,4 +27,13 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include "../rtl8192ce/fw.h" 30#include "../wifi.h"
31
32
33MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
34MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
35MODULE_AUTHOR("Georgia <georgia@realtek.com>");
36MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
37MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
38MODULE_LICENSE("GPL");
39MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 3728abc4df59..a70228278398 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -27,44 +27,15 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include "../wifi.h"
31#include "../rtl8192ce/reg.h"
32#include "../rtl8192ce/def.h"
33#include "dm_common.h"
34#include "phy_common.h"
35
30/* Define macro to shorten lines */ 36/* Define macro to shorten lines */
31#define MCS_TXPWR mcs_txpwrlevel_origoffset 37#define MCS_TXPWR mcs_txpwrlevel_origoffset
32 38
33static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
34 enum radio_path rfpath, u32 offset);
35static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
36 enum radio_path rfpath, u32 offset,
37 u32 data);
38static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
39 enum radio_path rfpath, u32 offset);
40static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 offset,
42 u32 data);
43static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
44static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
45static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
46static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
47 u8 configtype);
48static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
49 u8 configtype);
50static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
51static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
52 u32 cmdtableidx, u32 cmdtablesz,
53 enum swchnlcmd_id cmdid, u32 para1,
54 u32 para2, u32 msdelay);
55static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
56 u8 channel, u8 *stage, u8 *step,
57 u32 *delay);
58static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
59 enum wireless_mode wirelessmode,
60 long power_indbm);
61static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
62 enum radio_path rfpath);
63static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
64 enum wireless_mode wirelessmode,
65 u8 txpwridx);
66static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
67
68u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) 39u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
69{ 40{
70 struct rtl_priv *rtlpriv = rtl_priv(hw); 41 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -84,6 +55,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
84 return returnvalue; 55 return returnvalue;
85 56
86} 57}
58EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
87 59
88void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, 60void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
89 u32 regaddr, u32 bitmask, u32 data) 61 u32 regaddr, u32 bitmask, u32 data)
@@ -106,24 +78,26 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
106 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," 78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
107 " data(%#x)\n", regaddr, bitmask, 79 " data(%#x)\n", regaddr, bitmask,
108 data)); 80 data));
109
110} 81}
82EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
111 83
112static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 84u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
113 enum radio_path rfpath, u32 offset) 85 enum radio_path rfpath, u32 offset)
114{ 86{
115 RT_ASSERT(false, ("deprecated!\n")); 87 RT_ASSERT(false, ("deprecated!\n"));
116 return 0; 88 return 0;
117} 89}
90EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
118 91
119static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, 92void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
120 enum radio_path rfpath, u32 offset, 93 enum radio_path rfpath, u32 offset,
121 u32 data) 94 u32 data)
122{ 95{
123 RT_ASSERT(false, ("deprecated!\n")); 96 RT_ASSERT(false, ("deprecated!\n"));
124} 97}
98EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
125 99
126static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, 100u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
127 enum radio_path rfpath, u32 offset) 101 enum radio_path rfpath, u32 offset)
128{ 102{
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 103 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -172,8 +146,9 @@ static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
172 retvalue)); 146 retvalue));
173 return retvalue; 147 return retvalue;
174} 148}
149EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
175 150
176static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, 151void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
177 enum radio_path rfpath, u32 offset, 152 enum radio_path rfpath, u32 offset,
178 u32 data) 153 u32 data)
179{ 154{
@@ -195,8 +170,9 @@ static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
195 rfpath, pphyreg->rf3wire_offset, 170 rfpath, pphyreg->rf3wire_offset,
196 data_and_addr)); 171 data_and_addr));
197} 172}
173EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
198 174
199static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) 175u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
200{ 176{
201 u32 i; 177 u32 i;
202 178
@@ -206,6 +182,7 @@ static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
206 } 182 }
207 return i; 183 return i;
208} 184}
185EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
209 186
210static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw) 187static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
211{ 188{
@@ -222,10 +199,13 @@ static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
222} 199}
223bool rtl92c_phy_rf_config(struct ieee80211_hw *hw) 200bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
224{ 201{
225 return rtl92c_phy_rf6052_config(hw); 202 struct rtl_priv *rtlpriv = rtl_priv(hw);
203
204 return rtlpriv->cfg->ops->phy_rf6052_config(hw);
226} 205}
206EXPORT_SYMBOL(rtl92c_phy_rf_config);
227 207
228static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) 208bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
229{ 209{
230 struct rtl_priv *rtlpriv = rtl_priv(hw); 210 struct rtl_priv *rtlpriv = rtl_priv(hw);
231 struct rtl_phy *rtlphy = &(rtlpriv->phy); 211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -233,7 +213,7 @@ static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
233 bool rtstatus; 213 bool rtstatus;
234 214
235 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n")); 215 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
236 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw, 216 rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
237 BASEBAND_CONFIG_PHY_REG); 217 BASEBAND_CONFIG_PHY_REG);
238 if (rtstatus != true) { 218 if (rtstatus != true) {
239 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!")); 219 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
@@ -245,14 +225,14 @@ static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
245 } 225 }
246 if (rtlefuse->autoload_failflag == false) { 226 if (rtlefuse->autoload_failflag == false) {
247 rtlphy->pwrgroup_cnt = 0; 227 rtlphy->pwrgroup_cnt = 0;
248 rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw, 228 rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
249 BASEBAND_CONFIG_PHY_REG); 229 BASEBAND_CONFIG_PHY_REG);
250 } 230 }
251 if (rtstatus != true) { 231 if (rtstatus != true) {
252 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!")); 232 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
253 return false; 233 return false;
254 } 234 }
255 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw, 235 rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
256 BASEBAND_CONFIG_AGC_TAB); 236 BASEBAND_CONFIG_AGC_TAB);
257 if (rtstatus != true) { 237 if (rtstatus != true) {
258 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n")); 238 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
@@ -263,13 +243,9 @@ static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
263 0x200)); 243 0x200));
264 return true; 244 return true;
265} 245}
246EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
266 247
267 248void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
268void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
269{
270}
271
272static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
273 u32 regaddr, u32 bitmask, 249 u32 regaddr, u32 bitmask,
274 u32 data) 250 u32 data)
275{ 251{
@@ -404,12 +380,7 @@ static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
404 rtlphy->pwrgroup_cnt++; 380 rtlphy->pwrgroup_cnt++;
405 } 381 }
406} 382}
407 383EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
408static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
409 enum radio_path rfpath)
410{
411 return true;
412}
413 384
414void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) 385void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
415{ 386{
@@ -443,7 +414,7 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
443 ROFDM0_RXDETECTOR3, rtlphy->framesync)); 414 ROFDM0_RXDETECTOR3, rtlphy->framesync));
444} 415}
445 416
446static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) 417void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
447{ 418{
448 struct rtl_priv *rtlpriv = rtl_priv(hw); 419 struct rtl_priv *rtlpriv = rtl_priv(hw);
449 struct rtl_phy *rtlphy = &(rtlpriv->phy); 420 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -547,6 +518,7 @@ static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
547 TRANSCEIVEB_HSPI_READBACK; 518 TRANSCEIVEB_HSPI_READBACK;
548 519
549} 520}
521EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
550 522
551void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) 523void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
552{ 524{
@@ -615,7 +587,8 @@ static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
615 587
616void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) 588void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
617{ 589{
618 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 590 struct rtl_priv *rtlpriv = rtl_priv(hw);
591 struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
619 u8 cckpowerlevel[2], ofdmpowerlevel[2]; 592 u8 cckpowerlevel[2], ofdmpowerlevel[2];
620 593
621 if (rtlefuse->txpwr_fromeprom == false) 594 if (rtlefuse->txpwr_fromeprom == false)
@@ -625,9 +598,11 @@ void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
625 _rtl92c_ccxpower_index_check(hw, 598 _rtl92c_ccxpower_index_check(hw,
626 channel, &cckpowerlevel[0], 599 channel, &cckpowerlevel[0],
627 &ofdmpowerlevel[0]); 600 &ofdmpowerlevel[0]);
628 rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); 601 rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
629 rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); 602 rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
603 channel);
630} 604}
605EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
631 606
632bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) 607bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
633{ 608{
@@ -662,14 +637,16 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
662 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); 637 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
663 return true; 638 return true;
664} 639}
640EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
665 641
666void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval) 642void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
667{ 643{
668} 644}
645EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
669 646
670static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, 647u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
671 enum wireless_mode wirelessmode, 648 enum wireless_mode wirelessmode,
672 long power_indbm) 649 long power_indbm)
673{ 650{
674 u8 txpwridx; 651 u8 txpwridx;
675 long offset; 652 long offset;
@@ -697,10 +674,11 @@ static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
697 674
698 return txpwridx; 675 return txpwridx;
699} 676}
677EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
700 678
701static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, 679long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
702 enum wireless_mode wirelessmode, 680 enum wireless_mode wirelessmode,
703 u8 txpwridx) 681 u8 txpwridx)
704{ 682{
705 long offset; 683 long offset;
706 long pwrout_dbm; 684 long pwrout_dbm;
@@ -720,6 +698,7 @@ static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
720 pwrout_dbm = txpwridx / 2 + offset; 698 pwrout_dbm = txpwridx / 2 + offset;
721 return pwrout_dbm; 699 return pwrout_dbm;
722} 700}
701EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
723 702
724void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) 703void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
725{ 704{
@@ -749,6 +728,7 @@ void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
749 } 728 }
750 } 729 }
751} 730}
731EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
752 732
753void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, 733void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
754 enum nl80211_channel_type ch_type) 734 enum nl80211_channel_type ch_type)
@@ -762,7 +742,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
762 return; 742 return;
763 rtlphy->set_bwmode_inprogress = true; 743 rtlphy->set_bwmode_inprogress = true;
764 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) 744 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
765 rtl92c_phy_set_bw_mode_callback(hw); 745 rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
766 else { 746 else {
767 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 747 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
768 ("FALSE driver sleep or unload\n")); 748 ("FALSE driver sleep or unload\n"));
@@ -770,6 +750,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
770 rtlphy->current_chan_bw = tmp_bw; 750 rtlphy->current_chan_bw = tmp_bw;
771 } 751 }
772} 752}
753EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
773 754
774void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw) 755void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
775{ 756{
@@ -798,6 +779,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
798 } while (true); 779 } while (true);
799 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 780 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
800} 781}
782EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
801 783
802u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) 784u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
803{ 785{
@@ -827,6 +809,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
827 } 809 }
828 return 1; 810 return 1;
829} 811}
812EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
830 813
831static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, 814static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
832 u8 channel, u8 *stage, u8 *step, 815 u8 channel, u8 *stage, u8 *step,
@@ -961,6 +944,7 @@ bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
961{ 944{
962 return true; 945 return true;
963} 946}
947EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
964 948
965static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) 949static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
966{ 950{
@@ -1901,19 +1885,22 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1901 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg, 1885 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
1902 rtlphy->iqk_bb_backup, 10); 1886 rtlphy->iqk_bb_backup, 10);
1903} 1887}
1888EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
1904 1889
1905void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw) 1890void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
1906{ 1891{
1892 struct rtl_priv *rtlpriv = rtl_priv(hw);
1907 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1893 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1908 bool start_conttx = false, singletone = false; 1894 bool start_conttx = false, singletone = false;
1909 1895
1910 if (start_conttx || singletone) 1896 if (start_conttx || singletone)
1911 return; 1897 return;
1912 if (IS_92C_SERIAL(rtlhal->version)) 1898 if (IS_92C_SERIAL(rtlhal->version))
1913 _rtl92c_phy_lc_calibrate(hw, true); 1899 rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
1914 else 1900 else
1915 _rtl92c_phy_lc_calibrate(hw, false); 1901 rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
1916} 1902}
1903EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
1917 1904
1918void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) 1905void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
1919{ 1906{
@@ -1928,6 +1915,7 @@ void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
1928 else 1915 else
1929 _rtl92c_phy_ap_calibrate(hw, delta, false); 1916 _rtl92c_phy_ap_calibrate(hw, delta, false);
1930} 1917}
1918EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
1931 1919
1932void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) 1920void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1933{ 1921{
@@ -1938,6 +1926,7 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1938 else 1926 else
1939 _rtl92c_phy_set_rfpath_switch(hw, bmain, false); 1927 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
1940} 1928}
1929EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
1941 1930
1942bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) 1931bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1943{ 1932{
@@ -1976,6 +1965,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1976 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype)); 1965 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
1977 return true; 1966 return true;
1978} 1967}
1968EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
1979 1969
1980void rtl92c_phy_set_io(struct ieee80211_hw *hw) 1970void rtl92c_phy_set_io(struct ieee80211_hw *hw)
1981{ 1971{
@@ -2005,6 +1995,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
2005 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, 1995 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2006 ("<---(%#x)\n", rtlphy->current_io_type)); 1996 ("<---(%#x)\n", rtlphy->current_io_type));
2007} 1997}
1998EXPORT_SYMBOL(rtl92c_phy_set_io);
2008 1999
2009void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw) 2000void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2010{ 2001{
@@ -2017,8 +2008,9 @@ void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2017 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); 2008 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2018 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); 2009 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2019} 2010}
2011EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
2020 2012
2021static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw) 2013void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
2022{ 2014{
2023 u32 u4b_tmp; 2015 u32 u4b_tmp;
2024 u8 delay = 5; 2016 u8 delay = 5;
@@ -2047,3 +2039,4 @@ static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
2047 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); 2039 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2048 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); 2040 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2049} 2041}
2042EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644
index 000000000000..53ffb0981586
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -0,0 +1,246 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_PHY_H__
31#define __RTL92C_PHY_H__
32
33#define MAX_PRECMD_CNT 16
34#define MAX_RFDEPENDCMD_CNT 16
35#define MAX_POSTCMD_CNT 16
36
37#define MAX_DOZE_WAITING_TIMES_9x 64
38
39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41
42#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1
44
45#define APK_BB_REG_NUM 5
46#define APK_AFE_REG_NUM 16
47#define APK_CURVE_REG_NUM 4
48#define PATH_NUM 2
49
50#define LOOP_LIMIT 5
51#define MAX_STALL_TIME 50
52#define AntennaDiversityValue 0x80
53#define MAX_TXPWR_IDX_NMODE_92S 63
54#define Reset_Cnt_Limit 3
55
56#define IQK_ADDA_REG_NUM 16
57#define IQK_MAC_REG_NUM 4
58
59#define RF90_PATH_MAX 2
60
61#define CT_OFFSET_MAC_ADDR 0X16
62
63#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
64#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
65#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
66#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
67#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
68
69#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
70#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
71
72#define CT_OFFSET_CHANNEL_PLAH 0x75
73#define CT_OFFSET_THERMAL_METER 0x78
74#define CT_OFFSET_RF_OPTION 0x79
75#define CT_OFFSET_VERSION 0x7E
76#define CT_OFFSET_CUSTOMER_ID 0x7F
77
78#define RTL92C_MAX_PATH_NUM 2
79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
80enum swchnlcmd_id {
81 CMDID_END,
82 CMDID_SET_TXPOWEROWER_LEVEL,
83 CMDID_BBREGWRITE10,
84 CMDID_WRITEPORT_ULONG,
85 CMDID_WRITEPORT_USHORT,
86 CMDID_WRITEPORT_UCHAR,
87 CMDID_RF_WRITEREG,
88};
89
90struct swchnlcmd {
91 enum swchnlcmd_id cmdid;
92 u32 para1;
93 u32 para2;
94 u32 msdelay;
95};
96
97enum hw90_block_e {
98 HW90_BLOCK_MAC = 0,
99 HW90_BLOCK_PHY0 = 1,
100 HW90_BLOCK_PHY1 = 2,
101 HW90_BLOCK_RF = 3,
102 HW90_BLOCK_MAXIMUM = 4,
103};
104
105enum baseband_config_type {
106 BASEBAND_CONFIG_PHY_REG = 0,
107 BASEBAND_CONFIG_AGC_TAB = 1,
108};
109
110enum ra_offset_area {
111 RA_OFFSET_LEGACY_OFDM1,
112 RA_OFFSET_LEGACY_OFDM2,
113 RA_OFFSET_HT_OFDM1,
114 RA_OFFSET_HT_OFDM2,
115 RA_OFFSET_HT_OFDM3,
116 RA_OFFSET_HT_OFDM4,
117 RA_OFFSET_HT_CCK,
118};
119
120enum antenna_path {
121 ANTENNA_NONE,
122 ANTENNA_D,
123 ANTENNA_C,
124 ANTENNA_CD,
125 ANTENNA_B,
126 ANTENNA_BD,
127 ANTENNA_BC,
128 ANTENNA_BCD,
129 ANTENNA_A,
130 ANTENNA_AD,
131 ANTENNA_AC,
132 ANTENNA_ACD,
133 ANTENNA_AB,
134 ANTENNA_ABD,
135 ANTENNA_ABC,
136 ANTENNA_ABCD
137};
138
139struct r_antenna_select_ofdm {
140 u32 r_tx_antenna:4;
141 u32 r_ant_l:4;
142 u32 r_ant_non_ht:4;
143 u32 r_ant_ht1:4;
144 u32 r_ant_ht2:4;
145 u32 r_ant_ht_s1:4;
146 u32 r_ant_non_ht_s1:4;
147 u32 ofdm_txsc:2;
148 u32 reserved:2;
149};
150
151struct r_antenna_select_cck {
152 u8 r_cckrx_enable_2:2;
153 u8 r_cckrx_enable:2;
154 u8 r_ccktx_enable:4;
155};
156
157struct efuse_contents {
158 u8 mac_addr[ETH_ALEN];
159 u8 cck_tx_power_idx[6];
160 u8 ht40_1s_tx_power_idx[6];
161 u8 ht40_2s_tx_power_idx_diff[3];
162 u8 ht20_tx_power_idx_diff[3];
163 u8 ofdm_tx_power_idx_diff[3];
164 u8 ht40_max_power_offset[3];
165 u8 ht20_max_power_offset[3];
166 u8 channel_plan;
167 u8 thermal_meter;
168 u8 rf_option[5];
169 u8 version;
170 u8 oem_id;
171 u8 regulatory;
172};
173
174struct tx_power_struct {
175 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
176 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
177 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
178 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
179 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
180 u8 legacy_ht_txpowerdiff;
181 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
182 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
183 u8 pwrgroup_cnt;
184 u32 mcs_original_offset[4][16];
185};
186
187extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
188 u32 regaddr, u32 bitmask);
189extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
190 u32 regaddr, u32 bitmask, u32 data);
191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
192 enum radio_path rfpath, u32 regaddr,
193 u32 bitmask);
194extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
195 enum radio_path rfpath, u32 regaddr,
196 u32 bitmask, u32 data);
197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
198extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
201 enum radio_path rfpath);
202extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
203extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
204 long *powerlevel);
205extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
206extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm);
208extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation);
210extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
211extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
212 enum nl80211_channel_type ch_type);
213extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
214extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
216extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
217 u16 beaconinterval);
218void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
219void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
220void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
221bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
222 enum radio_path rfpath);
223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
224 u32 rfpath);
225extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
226 enum rf_pwrstate rfpwr_state);
227void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
228void rtl92c_phy_set_io(struct ieee80211_hw *hw);
229void rtl92c_bb_block_on(struct ieee80211_hw *hw);
230u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
231long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
232 enum wireless_mode wirelessmode,
233 u8 txpwridx);
234u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
235 enum wireless_mode wirelessmode,
236 long power_indbm);
237void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
238static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
239 u32 cmdtableidx, u32 cmdtablesz,
240 enum swchnlcmd_id cmdid, u32 para1,
241 u32 para2, u32 msdelay);
242static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
243 u8 channel, u8 *stage, u8 *step,
244 u32 *delay);
245
246#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
index 5c5fdde637cb..c0cb0cfe7d37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -1,6 +1,5 @@
1rtl8192ce-objs := \ 1rtl8192ce-objs := \
2 dm.o \ 2 dm.o \
3 fw.o \
4 hw.o \ 3 hw.o \
5 led.o \ 4 led.o \
6 phy.o \ 5 phy.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 888df5e2d2fc..7d76504df4d1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -33,11 +33,8 @@
33#include "def.h" 33#include "def.h"
34#include "phy.h" 34#include "phy.h"
35#include "dm.h" 35#include "dm.h"
36#include "fw.h"
37 36
38#include "../rtl8192c/dm_common.c" 37void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
39
40void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
41{ 38{
42 struct rtl_priv *rtlpriv = rtl_priv(hw); 39 struct rtl_priv *rtlpriv = rtl_priv(hw);
43 struct rtl_phy *rtlphy = &(rtlpriv->phy); 40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -114,5 +111,3 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
114 111
115 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; 112 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
116} 113}
117
118
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 5911d52a24ac..36302ebae4a3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -192,6 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); 192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); 193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); 194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
195void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw); 195void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
196 196
197#endif 197#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 0b910921e606..05477f465a75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -37,7 +37,6 @@
37#include "def.h" 37#include "def.h"
38#include "phy.h" 38#include "phy.h"
39#include "dm.h" 39#include "dm.h"
40#include "fw.h"
41#include "led.h" 40#include "led.h"
42#include "hw.h" 41#include "hw.h"
43 42
@@ -949,8 +948,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
949 } 948 }
950 949
951 rtlhal->last_hmeboxnum = 0; 950 rtlhal->last_hmeboxnum = 0;
952 rtl92c_phy_mac_config(hw); 951 rtl92ce_phy_mac_config(hw);
953 rtl92c_phy_bb_config(hw); 952 rtl92ce_phy_bb_config(hw);
954 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 953 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
955 rtl92c_phy_rf_config(hw); 954 rtl92c_phy_rf_config(hw);
956 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, 955 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 305c819c8c78..a3dfdb635168 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -30,6 +30,8 @@
30#ifndef __RTL92CE_HW_H__ 30#ifndef __RTL92CE_HW_H__
31#define __RTL92CE_HW_H__ 31#define __RTL92CE_HW_H__
32 32
33#define H2C_RA_MASK 6
34
33void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 35void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
34void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw); 36void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
35void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, 37void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
53void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, 55void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
54 u8 *p_macaddr, bool is_group, u8 enc_algo, 56 u8 *p_macaddr, bool is_group, u8 enc_algo,
55 bool is_wepkey, bool clear_all); 57 bool is_wepkey, bool clear_all);
58bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
59void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
60void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
61void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
62int rtl92c_download_fw(struct ieee80211_hw *hw);
63void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
64void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
65 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
66bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
56 67
57#endif 68#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 191106033b3c..d0541e8c6012 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -32,14 +32,13 @@
32#include "../ps.h" 32#include "../ps.h"
33#include "reg.h" 33#include "reg.h"
34#include "def.h" 34#include "def.h"
35#include "hw.h"
35#include "phy.h" 36#include "phy.h"
36#include "rf.h" 37#include "rf.h"
37#include "dm.h" 38#include "dm.h"
38#include "table.h" 39#include "table.h"
39 40
40#include "../rtl8192c/phy_common.c" 41u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
41
42u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 regaddr, u32 bitmask) 42 enum radio_path rfpath, u32 regaddr, u32 bitmask)
44{ 43{
45 struct rtl_priv *rtlpriv = rtl_priv(hw); 44 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -74,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
74 return readback_value; 73 return readback_value;
75} 74}
76 75
77void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 76void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
78 enum radio_path rfpath, 77 enum radio_path rfpath,
79 u32 regaddr, u32 bitmask, u32 data) 78 u32 regaddr, u32 bitmask, u32 data)
80{ 79{
@@ -122,19 +121,19 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
122 bitmask, data, rfpath)); 121 bitmask, data, rfpath));
123} 122}
124 123
125bool rtl92c_phy_mac_config(struct ieee80211_hw *hw) 124bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
126{ 125{
127 struct rtl_priv *rtlpriv = rtl_priv(hw); 126 struct rtl_priv *rtlpriv = rtl_priv(hw);
128 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 127 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
129 bool is92c = IS_92C_SERIAL(rtlhal->version); 128 bool is92c = IS_92C_SERIAL(rtlhal->version);
130 bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw); 129 bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
131 130
132 if (is92c) 131 if (is92c)
133 rtl_write_byte(rtlpriv, 0x14, 0x71); 132 rtl_write_byte(rtlpriv, 0x14, 0x71);
134 return rtstatus; 133 return rtstatus;
135} 134}
136 135
137bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) 136bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
138{ 137{
139 bool rtstatus = true; 138 bool rtstatus = true;
140 struct rtl_priv *rtlpriv = rtl_priv(hw); 139 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -160,7 +159,7 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
160 return rtstatus; 159 return rtstatus;
161} 160}
162 161
163static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) 162bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
164{ 163{
165 struct rtl_priv *rtlpriv = rtl_priv(hw); 164 struct rtl_priv *rtlpriv = rtl_priv(hw);
166 u32 i; 165 u32 i;
@@ -177,7 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
177 return true; 176 return true;
178} 177}
179 178
180static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 179bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
181 u8 configtype) 180 u8 configtype)
182{ 181{
183 int i; 182 int i;
@@ -221,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
221 phy_regarray_table[i], 220 phy_regarray_table[i],
222 phy_regarray_table[i + 1])); 221 phy_regarray_table[i + 1]));
223 } 222 }
224 rtl92c_phy_config_bb_external_pa(hw);
225 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { 223 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
226 for (i = 0; i < agctab_arraylen; i = i + 2) { 224 for (i = 0; i < agctab_arraylen; i = i + 2) {
227 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD, 225 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -237,7 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
237 return true; 235 return true;
238} 236}
239 237
240static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 238bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
241 u8 configtype) 239 u8 configtype)
242{ 240{
243 struct rtl_priv *rtlpriv = rtl_priv(hw); 241 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -276,7 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
276 return true; 274 return true;
277} 275}
278 276
279bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 277bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
280 enum radio_path rfpath) 278 enum radio_path rfpath)
281{ 279{
282 280
@@ -331,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
331 udelay(1); 329 udelay(1);
332 } 330 }
333 } 331 }
334 _rtl92c_phy_config_rf_external_pa(hw, rfpath);
335 break; 332 break;
336 case RF90_PATH_B: 333 case RF90_PATH_B:
337 for (i = 0; i < radiob_arraylen; i = i + 2) { 334 for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -367,7 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
367 return true; 364 return true;
368} 365}
369 366
370void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 367void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
371{ 368{
372 struct rtl_priv *rtlpriv = rtl_priv(hw); 369 struct rtl_priv *rtlpriv = rtl_priv(hw);
373 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 370 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -435,7 +432,7 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
435 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 432 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
436} 433}
437 434
438static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) 435void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
439{ 436{
440 u8 tmpreg; 437 u8 tmpreg;
441 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; 438 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
@@ -602,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 jiffies_to_msecs(jiffies - 599 jiffies_to_msecs(jiffies -
603 ppsc->last_awake_jiffies))); 600 ppsc->last_awake_jiffies)));
604 ppsc->last_sleep_jiffies = jiffies; 601 ppsc->last_sleep_jiffies = jiffies;
605 _rtl92ce_phy_set_rf_sleep(hw); 602 _rtl92c_phy_set_rf_sleep(hw);
606 break; 603 break;
607 } 604 }
608 default: 605 default:
@@ -617,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
617 return bresult; 614 return bresult;
618} 615}
619 616
620bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 617bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
621 enum rf_pwrstate rfpwr_state) 618 enum rf_pwrstate rfpwr_state)
622{ 619{
623 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 620 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index 3fc60e434cef..a37267e3fc22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -191,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
192 enum radio_path rfpath, u32 regaddr, 192 enum radio_path rfpath, u32 regaddr,
193 u32 bitmask); 193 u32 bitmask);
194extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 194extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
195 enum radio_path rfpath, u32 regaddr, 195 enum radio_path rfpath, u32 regaddr,
196 u32 bitmask, u32 data); 196 u32 bitmask, u32 data);
197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); 197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
198extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); 198bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); 199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
201 enum radio_path rfpath); 201 enum radio_path rfpath);
@@ -223,12 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, 223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
224 u32 rfpath); 224 u32 rfpath);
225bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 225bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
226extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 226bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
227 enum rf_pwrstate rfpwr_state); 227 enum rf_pwrstate rfpwr_state);
228void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
229void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); 228void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
230bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
231void rtl92c_phy_set_io(struct ieee80211_hw *hw); 230void rtl92c_phy_set_io(struct ieee80211_hw *hw);
232void rtl92c_bb_block_on(struct ieee80211_hw *hw); 231void rtl92c_bb_block_on(struct ieee80211_hw *hw);
232u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
233 enum radio_path rfpath, u32 offset);
234u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
235 enum radio_path rfpath, u32 offset);
236u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
237void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
238 enum radio_path rfpath, u32 offset,
239 u32 data);
240void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
241 u32 regaddr, u32 bitmask,
242 u32 data);
243void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
244 enum radio_path rfpath, u32 offset,
245 u32 data);
246void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
247 u32 regaddr, u32 bitmask,
248 u32 data);
249bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
250void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
251bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
252void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
233 253
234#endif 254#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index ffd8e04c4028..669b1168dbec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
61 } 61 }
62} 62}
63 63
64void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 64void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel) 65 u8 *ppowerlevel)
66{ 66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 67 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
410 } 410 }
411} 411}
412 412
413void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 413void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
414 u8 *ppowerlevel, u8 channel) 414 u8 *ppowerlevel, u8 channel)
415{ 415{
416 u32 writeVal[2], powerBase0[2], powerBase1[2]; 416 u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
430 } 430 }
431} 431}
432 432
433bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw) 433bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
434{ 434{
435 struct rtl_priv *rtlpriv = rtl_priv(hw); 435 struct rtl_priv *rtlpriv = rtl_priv(hw);
436 struct rtl_phy *rtlphy = &(rtlpriv->phy); 436 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
484 484
485 switch (rfpath) { 485 switch (rfpath) {
486 case RF90_PATH_A: 486 case RF90_PATH_A:
487 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 487 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
488 (enum radio_path) rfpath); 488 (enum radio_path) rfpath);
489 break; 489 break;
490 case RF90_PATH_B: 490 case RF90_PATH_B:
491 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 491 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
492 (enum radio_path) rfpath); 492 (enum radio_path) rfpath);
493 break; 493 break;
494 case RF90_PATH_C: 494 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index d3014f99bb7b..3aa520c1c171 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel); 40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel); 42 u8 *ppowerlevel, u8 channel);
43extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw); 43bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath);
46
44#endif 47#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b4df0b332832..b1cc4d44f534 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -37,6 +37,7 @@
37#include "phy.h" 37#include "phy.h"
38#include "dm.h" 38#include "dm.h"
39#include "hw.h" 39#include "hw.h"
40#include "rf.h"
40#include "sw.h" 41#include "sw.h"
41#include "trx.h" 42#include "trx.h"
42#include "led.h" 43#include "led.h"
@@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
122 .switch_channel = rtl92c_phy_sw_chnl, 123 .switch_channel = rtl92c_phy_sw_chnl,
123 .dm_watchdog = rtl92c_dm_watchdog, 124 .dm_watchdog = rtl92c_dm_watchdog,
124 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 125 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
125 .set_rf_power_state = rtl92c_phy_set_rf_power_state, 126 .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
126 .led_control = rtl92ce_led_control, 127 .led_control = rtl92ce_led_control,
127 .set_desc = rtl92ce_set_desc, 128 .set_desc = rtl92ce_set_desc,
128 .get_desc = rtl92ce_get_desc, 129 .get_desc = rtl92ce_get_desc,
@@ -133,9 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
133 .deinit_sw_leds = rtl92ce_deinit_sw_leds, 134 .deinit_sw_leds = rtl92ce_deinit_sw_leds,
134 .get_bbreg = rtl92c_phy_query_bb_reg, 135 .get_bbreg = rtl92c_phy_query_bb_reg,
135 .set_bbreg = rtl92c_phy_set_bb_reg, 136 .set_bbreg = rtl92c_phy_set_bb_reg,
136 .get_rfreg = rtl92c_phy_query_rf_reg, 137 .get_rfreg = rtl92ce_phy_query_rf_reg,
137 .set_rfreg = rtl92c_phy_set_rf_reg, 138 .set_rfreg = rtl92ce_phy_set_rf_reg,
138 .cmd_send_packet = _rtl92c_cmd_send_packet, 139 .cmd_send_packet = _rtl92c_cmd_send_packet,
140 .phy_rf6052_config = rtl92ce_phy_rf6052_config,
141 .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
142 .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
143 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
144 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
145 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
146 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
147 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
139}; 148};
140 149
141static struct rtl_mod_params rtl92ce_mod_params = { 150static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index 0568d6dc83d7..36e657668c1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -35,5 +35,17 @@ void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl92c_init_var_map(struct ieee80211_hw *hw); 35void rtl92c_init_var_map(struct ieee80211_hw *hw);
36bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, 36bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
37 struct sk_buff *skb); 37 struct sk_buff *skb);
38void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39 u8 *ppowerlevel);
40void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41 u8 *ppowerlevel, u8 channel);
42bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
43 u8 configtype);
44bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
45 u8 configtype);
46void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
47u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 regaddr, u32 bitmask);
49void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
38 50
39#endif 51#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 01b95427fee0..aa2b5815600f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -252,9 +252,9 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
252 struct rtl_priv *rtlpriv = rtl_priv(hw); 252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct phy_sts_cck_8192s_t *cck_buf; 253 struct phy_sts_cck_8192s_t *cck_buf;
254 s8 rx_pwr_all, rx_pwr[4]; 254 s8 rx_pwr_all, rx_pwr[4];
255 u8 rf_rx_num, evm, pwdb_all; 255 u8 evm, pwdb_all, rf_rx_num = 0;
256 u8 i, max_spatial_stream; 256 u8 i, max_spatial_stream;
257 u32 rssi, total_rssi; 257 u32 rssi, total_rssi = 0;
258 bool is_cck_rate; 258 bool is_cck_rate;
259 259
260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
@@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
463 struct rtl_stats *pstats) 463 struct rtl_stats *pstats)
464{ 464{
465 struct rtl_priv *rtlpriv = rtl_priv(hw); 465 struct rtl_priv *rtlpriv = rtl_priv(hw);
466 int weighting; 466 int weighting = 0;
467 467
468 if (rtlpriv->stats.recv_signal_power == 0) 468 if (rtlpriv->stats.recv_signal_power == 0)
469 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; 469 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
@@ -691,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
691 if (GET_RX_DESC_RXHT(pdesc)) 691 if (GET_RX_DESC_RXHT(pdesc))
692 rx_status->flag |= RX_FLAG_HT; 692 rx_status->flag |= RX_FLAG_HT;
693 693
694 rx_status->flag |= RX_FLAG_TSFT; 694 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
695 695
696 if (stats->decrypted) 696 if (stats->decrypted)
697 rx_status->flag |= RX_FLAG_DECRYPTED; 697 rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -730,7 +730,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
730 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 730 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
731 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 731 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
732 bool defaultadapter = true; 732 bool defaultadapter = true;
733 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid); 733 struct ieee80211_sta *sta;
734 u8 *pdesc = (u8 *) pdesc_tx; 734 u8 *pdesc = (u8 *) pdesc_tx;
735 struct rtl_tcb_desc tcb_desc; 735 struct rtl_tcb_desc tcb_desc;
736 u8 *qc = ieee80211_get_qos_ctl(hdr); 736 u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -810,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
810 SET_TX_DESC_LINIP(pdesc, 0); 810 SET_TX_DESC_LINIP(pdesc, 0);
811 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); 811 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
812 812
813 rcu_read_lock();
814 sta = ieee80211_find_sta(mac->vif, mac->bssid);
813 if (sta) { 815 if (sta) {
814 u8 ampdu_density = sta->ht_cap.ampdu_density; 816 u8 ampdu_density = sta->ht_cap.ampdu_density;
815 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); 817 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
816 } 818 }
819 rcu_read_unlock();
817 820
818 if (info->control.hw_key) { 821 if (info->control.hw_key) {
819 struct ieee80211_key_conf *keyconf = 822 struct ieee80211_key_conf *keyconf =
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
index 91c65122ca80..ad2de6b839ef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
@@ -1,6 +1,5 @@
1rtl8192cu-objs := \ 1rtl8192cu-objs := \
2 dm.o \ 2 dm.o \
3 fw.o \
4 hw.o \ 3 hw.o \
5 led.o \ 4 led.o \
6 mac.o \ 5 mac.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index a4649a2f7e6f..f311baee668d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -33,11 +33,8 @@
33#include "def.h" 33#include "def.h"
34#include "phy.h" 34#include "phy.h"
35#include "dm.h" 35#include "dm.h"
36#include "fw.h"
37 36
38#include "../rtl8192c/dm_common.c" 37void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
39
40void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
41{ 38{
42 struct rtl_priv *rtlpriv = rtl_priv(hw); 39 struct rtl_priv *rtlpriv = rtl_priv(hw);
43 struct rtl_phy *rtlphy = &(rtlpriv->phy); 40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
index 5e7fbfc2851b..7f966c666b5a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -29,4 +29,4 @@
29 29
30#include "../rtl8192ce/dm.h" 30#include "../rtl8192ce/dm.h"
31 31
32void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw); 32void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index df8fe3b51c9b..9444e76838cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -38,7 +38,6 @@
38#include "phy.h" 38#include "phy.h"
39#include "mac.h" 39#include "mac.h"
40#include "dm.h" 40#include "dm.h"
41#include "fw.h"
42#include "hw.h" 41#include "hw.h"
43#include "trx.h" 42#include "trx.h"
44#include "led.h" 43#include "led.h"
@@ -1190,8 +1189,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1190 } 1189 }
1191 rtlhal->last_hmeboxnum = 0; /* h2c */ 1190 rtlhal->last_hmeboxnum = 0; /* h2c */
1192 _rtl92cu_phy_param_tab_init(hw); 1191 _rtl92cu_phy_param_tab_init(hw);
1193 rtl92c_phy_mac_config(hw); 1192 rtl92cu_phy_mac_config(hw);
1194 rtl92c_phy_bb_config(hw); 1193 rtl92cu_phy_bb_config(hw);
1195 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 1194 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
1196 rtl92c_phy_rf_config(hw); 1195 rtl92c_phy_rf_config(hw);
1197 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && 1196 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
@@ -1203,7 +1202,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1203 RF_CHNLBW, RFREG_OFFSET_MASK); 1202 RF_CHNLBW, RFREG_OFFSET_MASK);
1204 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, 1203 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
1205 RF_CHNLBW, RFREG_OFFSET_MASK); 1204 RF_CHNLBW, RFREG_OFFSET_MASK);
1206 rtl92c_bb_block_on(hw); 1205 rtl92cu_bb_block_on(hw);
1207 rtl_cam_reset_all_entry(hw); 1206 rtl_cam_reset_all_entry(hw);
1208 rtl92cu_enable_hw_security_config(hw); 1207 rtl92cu_enable_hw_security_config(hw);
1209 ppsc->rfpwr_state = ERFON; 1208 ppsc->rfpwr_state = ERFON;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 3c0ea5ea6db5..62af555bb61c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -30,6 +30,8 @@
30#ifndef __RTL92CU_HW_H__ 30#ifndef __RTL92CU_HW_H__
31#define __RTL92CU_HW_H__ 31#define __RTL92CU_HW_H__
32 32
33#define H2C_RA_MASK 6
34
33#define LLT_POLLING_LLT_THRESHOLD 20 35#define LLT_POLLING_LLT_THRESHOLD 20
34#define LLT_POLLING_READY_TIMEOUT_COUNT 100 36#define LLT_POLLING_READY_TIMEOUT_COUNT 100
35#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 37#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
@@ -103,5 +105,12 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
103bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); 105bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); 106void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
105u8 _rtl92c_get_chnl_group(u8 chnl); 107u8 _rtl92c_get_chnl_group(u8 chnl);
108int rtl92c_download_fw(struct ieee80211_hw *hw);
109void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
110void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
111void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
112void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
113 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
114bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
106 115
107#endif 116#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index dc65ef2bbeac..4e020e654e6b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -37,9 +37,7 @@
37#include "dm.h" 37#include "dm.h"
38#include "table.h" 38#include "table.h"
39 39
40#include "../rtl8192c/phy_common.c" 40u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
41
42u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 regaddr, u32 bitmask) 41 enum radio_path rfpath, u32 regaddr, u32 bitmask)
44{ 42{
45 struct rtl_priv *rtlpriv = rtl_priv(hw); 43 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -65,7 +63,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
65 return readback_value; 63 return readback_value;
66} 64}
67 65
68void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 66void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
69 enum radio_path rfpath, 67 enum radio_path rfpath,
70 u32 regaddr, u32 bitmask, u32 data) 68 u32 regaddr, u32 bitmask, u32 data)
71{ 69{
@@ -104,20 +102,20 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
104 regaddr, bitmask, data, rfpath)); 102 regaddr, bitmask, data, rfpath));
105} 103}
106 104
107bool rtl92c_phy_mac_config(struct ieee80211_hw *hw) 105bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
108{ 106{
109 bool rtstatus; 107 bool rtstatus;
110 struct rtl_priv *rtlpriv = rtl_priv(hw); 108 struct rtl_priv *rtlpriv = rtl_priv(hw);
111 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 109 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
112 bool is92c = IS_92C_SERIAL(rtlhal->version); 110 bool is92c = IS_92C_SERIAL(rtlhal->version);
113 111
114 rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw); 112 rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
115 if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal)) 113 if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
116 rtl_write_byte(rtlpriv, 0x14, 0x71); 114 rtl_write_byte(rtlpriv, 0x14, 0x71);
117 return rtstatus; 115 return rtstatus;
118} 116}
119 117
120bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) 118bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
121{ 119{
122 bool rtstatus = true; 120 bool rtstatus = true;
123 struct rtl_priv *rtlpriv = rtl_priv(hw); 121 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -146,7 +144,7 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
146 return rtstatus; 144 return rtstatus;
147} 145}
148 146
149static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) 147bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
150{ 148{
151 struct rtl_priv *rtlpriv = rtl_priv(hw); 149 struct rtl_priv *rtlpriv = rtl_priv(hw);
152 struct rtl_phy *rtlphy = &(rtlpriv->phy); 150 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -164,7 +162,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
164 return true; 162 return true;
165} 163}
166 164
167static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 165bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
168 u8 configtype) 166 u8 configtype)
169{ 167{
170 int i; 168 int i;
@@ -209,7 +207,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
209 phy_regarray_table[i], 207 phy_regarray_table[i],
210 phy_regarray_table[i + 1])); 208 phy_regarray_table[i + 1]));
211 } 209 }
212 rtl92c_phy_config_bb_external_pa(hw);
213 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { 210 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
214 for (i = 0; i < agctab_arraylen; i = i + 2) { 211 for (i = 0; i < agctab_arraylen; i = i + 2) {
215 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD, 212 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -225,7 +222,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
225 return true; 222 return true;
226} 223}
227 224
228static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 225bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
229 u8 configtype) 226 u8 configtype)
230{ 227{
231 struct rtl_priv *rtlpriv = rtl_priv(hw); 228 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -263,7 +260,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
263 return true; 260 return true;
264} 261}
265 262
266bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 263bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
267 enum radio_path rfpath) 264 enum radio_path rfpath)
268{ 265{
269 int i; 266 int i;
@@ -316,7 +313,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
316 udelay(1); 313 udelay(1);
317 } 314 }
318 } 315 }
319 _rtl92c_phy_config_rf_external_pa(hw, rfpath);
320 break; 316 break;
321 case RF90_PATH_B: 317 case RF90_PATH_B:
322 for (i = 0; i < radiob_arraylen; i = i + 2) { 318 for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -352,7 +348,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
352 return true; 348 return true;
353} 349}
354 350
355void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 351void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
356{ 352{
357 struct rtl_priv *rtlpriv = rtl_priv(hw); 353 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 354 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -410,12 +406,12 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
410 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); 406 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
411 break; 407 break;
412 } 408 }
413 rtl92c_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); 409 rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
414 rtlphy->set_bwmode_inprogress = false; 410 rtlphy->set_bwmode_inprogress = false;
415 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 411 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
416} 412}
417 413
418void rtl92c_bb_block_on(struct ieee80211_hw *hw) 414void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
419{ 415{
420 struct rtl_priv *rtlpriv = rtl_priv(hw); 416 struct rtl_priv *rtlpriv = rtl_priv(hw);
421 417
@@ -425,7 +421,7 @@ void rtl92c_bb_block_on(struct ieee80211_hw *hw)
425 mutex_unlock(&rtlpriv->io.bb_mutex); 421 mutex_unlock(&rtlpriv->io.bb_mutex);
426} 422}
427 423
428static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) 424void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
429{ 425{
430 u8 tmpreg; 426 u8 tmpreg;
431 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; 427 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
@@ -463,7 +459,7 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
463 } 459 }
464} 460}
465 461
466static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 462bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
467 enum rf_pwrstate rfpwr_state) 463 enum rf_pwrstate rfpwr_state)
468{ 464{
469 struct rtl_priv *rtlpriv = rtl_priv(hw); 465 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -584,7 +580,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
584 jiffies_to_msecs(jiffies - 580 jiffies_to_msecs(jiffies -
585 ppsc->last_awake_jiffies))); 581 ppsc->last_awake_jiffies)));
586 ppsc->last_sleep_jiffies = jiffies; 582 ppsc->last_sleep_jiffies = jiffies;
587 _rtl92ce_phy_set_rf_sleep(hw); 583 _rtl92c_phy_set_rf_sleep(hw);
588 break; 584 break;
589 default: 585 default:
590 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 586 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -598,7 +594,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
598 return bresult; 594 return bresult;
599} 595}
600 596
601bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 597bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 enum rf_pwrstate rfpwr_state) 598 enum rf_pwrstate rfpwr_state)
603{ 599{
604 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 600 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -606,6 +602,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
606 602
607 if (rfpwr_state == ppsc->rfpwr_state) 603 if (rfpwr_state == ppsc->rfpwr_state)
608 return bresult; 604 return bresult;
609 bresult = _rtl92ce_phy_set_rf_power_state(hw, rfpwr_state); 605 bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
610 return bresult; 606 return bresult;
611} 607}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
index c456c15afbf1..06299559ab68 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -29,6 +29,8 @@
29 29
30#include "../rtl8192ce/phy.h" 30#include "../rtl8192ce/phy.h"
31 31
32void rtl92c_bb_block_on(struct ieee80211_hw *hw); 32void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
33bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath); 33bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
34void rtl92c_phy_set_io(struct ieee80211_hw *hw); 34void rtl92c_phy_set_io(struct ieee80211_hw *hw);
35bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
36bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 9149adcc8fa5..1c79c226f145 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -36,7 +36,7 @@
36 36
37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw); 37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
38 38
39void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) 39void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
40{ 40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw); 41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy); 42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
61 } 61 }
62} 62}
63 63
64void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 64void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel) 65 u8 *ppowerlevel)
66{ 66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 67 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -388,7 +388,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
388 } 388 }
389} 389}
390 390
391void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 391void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
392 u8 *ppowerlevel, u8 channel) 392 u8 *ppowerlevel, u8 channel)
393{ 393{
394 u32 writeVal[2], powerBase0[2], powerBase1[2]; 394 u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -406,7 +406,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
406 } 406 }
407} 407}
408 408
409bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw) 409bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
410{ 410{
411 struct rtl_priv *rtlpriv = rtl_priv(hw); 411 struct rtl_priv *rtlpriv = rtl_priv(hw);
412 struct rtl_phy *rtlphy = &(rtlpriv->phy); 412 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -456,11 +456,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
456 udelay(1); 456 udelay(1);
457 switch (rfpath) { 457 switch (rfpath) {
458 case RF90_PATH_A: 458 case RF90_PATH_A:
459 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 459 rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
460 (enum radio_path) rfpath); 460 (enum radio_path) rfpath);
461 break; 461 break;
462 case RF90_PATH_B: 462 case RF90_PATH_B:
463 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, 463 rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
464 (enum radio_path) rfpath); 464 (enum radio_path) rfpath);
465 break; 465 break;
466 case RF90_PATH_C: 466 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index c4ed125ef4dc..86c2728cfa00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -27,4 +27,21 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include "../rtl8192ce/rf.h" 30#ifndef __RTL92CU_RF_H__
31#define __RTL92CU_RF_H__
32
33#define RF6052_MAX_TX_PWR 0x3F
34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2
36
37extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
38 u8 bandwidth);
39extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel);
43bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath);
46
47#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 4e937e0da8e2..71244a38d49e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -36,11 +36,12 @@
36#include "phy.h" 36#include "phy.h"
37#include "mac.h" 37#include "mac.h"
38#include "dm.h" 38#include "dm.h"
39#include "rf.h"
39#include "sw.h" 40#include "sw.h"
40#include "trx.h" 41#include "trx.h"
41#include "led.h" 42#include "led.h"
42#include "hw.h" 43#include "hw.h"
43 44#include <linux/vmalloc.h>
44 45
45MODULE_AUTHOR("Georgia <georgia@realtek.com>"); 46MODULE_AUTHOR("Georgia <georgia@realtek.com>");
46MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>"); 47MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
@@ -106,7 +107,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
106 .switch_channel = rtl92c_phy_sw_chnl, 107 .switch_channel = rtl92c_phy_sw_chnl,
107 .dm_watchdog = rtl92c_dm_watchdog, 108 .dm_watchdog = rtl92c_dm_watchdog,
108 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 109 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
109 .set_rf_power_state = rtl92c_phy_set_rf_power_state, 110 .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
110 .led_control = rtl92cu_led_control, 111 .led_control = rtl92cu_led_control,
111 .enable_hw_sec = rtl92cu_enable_hw_security_config, 112 .enable_hw_sec = rtl92cu_enable_hw_security_config,
112 .set_key = rtl92c_set_key, 113 .set_key = rtl92c_set_key,
@@ -114,8 +115,16 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
114 .deinit_sw_leds = rtl92cu_deinit_sw_leds, 115 .deinit_sw_leds = rtl92cu_deinit_sw_leds,
115 .get_bbreg = rtl92c_phy_query_bb_reg, 116 .get_bbreg = rtl92c_phy_query_bb_reg,
116 .set_bbreg = rtl92c_phy_set_bb_reg, 117 .set_bbreg = rtl92c_phy_set_bb_reg,
117 .get_rfreg = rtl92c_phy_query_rf_reg, 118 .get_rfreg = rtl92cu_phy_query_rf_reg,
118 .set_rfreg = rtl92c_phy_set_rf_reg, 119 .set_rfreg = rtl92cu_phy_set_rf_reg,
120 .phy_rf6052_config = rtl92cu_phy_rf6052_config,
121 .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
122 .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
123 .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
124 .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
125 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
126 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
127 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
119}; 128};
120 129
121static struct rtl_mod_params rtl92cu_mod_params = { 130static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index 3b2c66339554..43b1177924ab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -32,4 +32,22 @@
32 32
33#define EFUSE_MAX_SECTION 16 33#define EFUSE_MAX_SECTION 16
34 34
35void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
36 u8 *powerlevel);
37void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
38 u8 *ppowerlevel, u8 channel);
39bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
40 u8 configtype);
41bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
42 u8 configtype);
43void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
44void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
45 enum radio_path rfpath,
46 u32 regaddr, u32 bitmask, u32 data);
47bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
48 enum rf_pwrstate rfpwr_state);
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52
35#endif 53#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 9855c3e0a4b2..d0b0d43b9a6d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -334,7 +334,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
334 rx_status->flag |= RX_FLAG_40MHZ; 334 rx_status->flag |= RX_FLAG_40MHZ;
335 if (GET_RX_DESC_RX_HT(pdesc)) 335 if (GET_RX_DESC_RX_HT(pdesc))
336 rx_status->flag |= RX_FLAG_HT; 336 rx_status->flag |= RX_FLAG_HT;
337 rx_status->flag |= RX_FLAG_TSFT; 337 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
338 if (stats->decrypted) 338 if (stats->decrypted)
339 rx_status->flag |= RX_FLAG_DECRYPTED; 339 rx_status->flag |= RX_FLAG_DECRYPTED;
340 rx_status->rate_idx = _rtl92c_rate_mapping(hw, 340 rx_status->rate_idx = _rtl92c_rate_mapping(hw,
@@ -504,7 +504,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true; 506 bool defaultadapter = true;
507 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid); 507 struct ieee80211_sta *sta;
508 struct rtl_tcb_desc tcb_desc; 508 struct rtl_tcb_desc tcb_desc;
509 u8 *qc = ieee80211_get_qos_ctl(hdr); 509 u8 *qc = ieee80211_get_qos_ctl(hdr);
510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
@@ -562,10 +562,13 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
562 SET_TX_DESC_DATA_BW(txdesc, 0); 562 SET_TX_DESC_DATA_BW(txdesc, 0);
563 SET_TX_DESC_DATA_SC(txdesc, 0); 563 SET_TX_DESC_DATA_SC(txdesc, 0);
564 } 564 }
565 rcu_read_lock();
566 sta = ieee80211_find_sta(mac->vif, mac->bssid);
565 if (sta) { 567 if (sta) {
566 u8 ampdu_density = sta->ht_cap.ampdu_density; 568 u8 ampdu_density = sta->ht_cap.ampdu_density;
567 SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); 569 SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
568 } 570 }
571 rcu_read_unlock();
569 if (info->control.hw_key) { 572 if (info->control.hw_key) {
570 struct ieee80211_key_conf *keyconf = info->control.hw_key; 573 struct ieee80211_key_conf *keyconf = info->control.hw_key;
571 switch (keyconf->cipher) { 574 switch (keyconf->cipher) {
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 7d47184d6bfe..01226f8e70f9 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1384,6 +1384,18 @@ struct rtl_hal_ops {
1384 u32 regaddr, u32 bitmask); 1384 u32 regaddr, u32 bitmask);
1385 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1385 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1386 u32 regaddr, u32 bitmask, u32 data); 1386 u32 regaddr, u32 bitmask, u32 data);
1387 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
1388 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
1389 u8 *powerlevel);
1390 void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
1391 u8 *ppowerlevel, u8 channel);
1392 bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
1393 u8 configtype);
1394 bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
1395 u8 configtype);
1396 void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
1397 void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
1398 void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
1387}; 1399};
1388 1400
1389struct rtl_intf_ops { 1401struct rtl_intf_ops {
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 5a1c13878eaf..12c9e635a6d6 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -375,7 +375,7 @@ out:
375 mutex_unlock(&wl->mutex); 375 mutex_unlock(&wl->mutex);
376} 376}
377 377
378static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 378static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
379{ 379{
380 struct wl1251 *wl = hw->priv; 380 struct wl1251 *wl = hw->priv;
381 unsigned long flags; 381 unsigned long flags;
@@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
401 wl->tx_queue_stopped = true; 401 wl->tx_queue_stopped = true;
402 spin_unlock_irqrestore(&wl->wl_lock, flags); 402 spin_unlock_irqrestore(&wl->wl_lock, flags);
403 } 403 }
404
405 return NETDEV_TX_OK;
406} 404}
407 405
408static int wl1251_op_start(struct ieee80211_hw *hw) 406static int wl1251_op_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index b659e15c78df..c1b3b3f03da2 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -81,7 +81,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
81 status->freq = ieee80211_channel_to_frequency(desc->channel, 81 status->freq = ieee80211_channel_to_frequency(desc->channel,
82 status->band); 82 status->band);
83 83
84 status->flag |= RX_FLAG_TSFT; 84 status->flag |= RX_FLAG_MACTIME_MPDU;
85 85
86 if (desc->flags & RX_DESC_ENCRYPTION_MASK) { 86 if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
87 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 87 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 33840d95d17d..3badc6bb7866 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -1328,10 +1328,9 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1328 /* get data from A-MPDU parameters field */ 1328 /* get data from A-MPDU parameters field */
1329 acx->ampdu_max_length = ht_cap->ampdu_factor; 1329 acx->ampdu_max_length = ht_cap->ampdu_factor;
1330 acx->ampdu_min_spacing = ht_cap->ampdu_density; 1330 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1331
1332 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1333 } 1331 }
1334 1332
1333 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1335 acx->ht_capabilites = cpu_to_le32(ht_capabilites); 1334 acx->ht_capabilites = cpu_to_le32(ht_capabilites);
1336 1335
1337 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); 1336 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
@@ -1542,3 +1541,28 @@ out:
1542 kfree(config_ps); 1541 kfree(config_ps);
1543 return ret; 1542 return ret;
1544} 1543}
1544
1545int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
1546{
1547 struct wl1271_acx_inconnection_sta *acx = NULL;
1548 int ret;
1549
1550 wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
1551
1552 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1553 if (!acx)
1554 return -ENOMEM;
1555
1556 memcpy(acx->addr, addr, ETH_ALEN);
1557
1558 ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
1559 acx, sizeof(*acx));
1560 if (ret < 0) {
1561 wl1271_warning("acx set inconnaction sta failed: %d", ret);
1562 goto out;
1563 }
1564
1565out:
1566 kfree(acx);
1567 return ret;
1568}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 4e301de916bb..dd19b01d807b 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -1155,6 +1155,13 @@ struct wl1271_acx_config_ps {
1155 __le32 null_data_rate; 1155 __le32 null_data_rate;
1156} __packed; 1156} __packed;
1157 1157
1158struct wl1271_acx_inconnection_sta {
1159 struct acx_header header;
1160
1161 u8 addr[ETH_ALEN];
1162 u8 padding1[2];
1163} __packed;
1164
1158enum { 1165enum {
1159 ACX_WAKE_UP_CONDITIONS = 0x0002, 1166 ACX_WAKE_UP_CONDITIONS = 0x0002,
1160 ACX_MEM_CFG = 0x0003, 1167 ACX_MEM_CFG = 0x0003,
@@ -1215,6 +1222,7 @@ enum {
1215 ACX_GEN_FW_CMD = 0x0070, 1222 ACX_GEN_FW_CMD = 0x0070,
1216 ACX_HOST_IF_CFG_BITMAP = 0x0071, 1223 ACX_HOST_IF_CFG_BITMAP = 0x0071,
1217 ACX_MAX_TX_FAILURE = 0x0072, 1224 ACX_MAX_TX_FAILURE = 0x0072,
1225 ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
1218 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1226 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1219 DOT11_CUR_TX_PWR = 0x100D, 1227 DOT11_CUR_TX_PWR = 0x100D,
1220 DOT11_RX_DOT11_MODE = 0x1012, 1228 DOT11_RX_DOT11_MODE = 0x1012,
@@ -1290,5 +1298,6 @@ int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1290int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1298int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1291int wl1271_acx_max_tx_retry(struct wl1271 *wl); 1299int wl1271_acx_max_tx_retry(struct wl1271 *wl);
1292int wl1271_acx_config_ps(struct wl1271 *wl); 1300int wl1271_acx_config_ps(struct wl1271 *wl);
1301int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1293 1302
1294#endif /* __WL1271_ACX_H__ */ 1303#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 62dc9839dd31..6072fe457135 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -483,7 +483,7 @@ static void wl1271_check_ba_support(struct wl1271 *wl)
483static int wl1271_set_ba_policies(struct wl1271 *wl) 483static int wl1271_set_ba_policies(struct wl1271 *wl)
484{ 484{
485 u8 tid_index; 485 u8 tid_index;
486 u8 ret = 0; 486 int ret = 0;
487 487
488 /* Reset the BA RX indicators */ 488 /* Reset the BA RX indicators */
489 wl->ba_rx_bitmap = 0; 489 wl->ba_rx_bitmap = 0;
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 61dea73f5fdc..947491a1d9cc 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -482,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
482 if (ret < 0) 482 if (ret < 0)
483 goto out_free_memmap; 483 goto out_free_memmap;
484 484
485 ret = wl1271_acx_sta_mem_cfg(wl);
486 if (ret < 0)
487 goto out_free_memmap;
488
485 /* Default fragmentation threshold */ 489 /* Default fragmentation threshold */
486 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); 490 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
487 if (ret < 0) 491 if (ret < 0)
@@ -533,6 +537,57 @@ static int wl1271_plt_init(struct wl1271 *wl)
533 return ret; 537 return ret;
534} 538}
535 539
540static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
541{
542 bool fw_ps;
543
544 /* only regulate station links */
545 if (hlid < WL1271_AP_STA_HLID_START)
546 return;
547
548 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
549
550 /*
551 * Wake up from high level PS if the STA is asleep with too little
552 * blocks in FW or if the STA is awake.
553 */
554 if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
555 wl1271_ps_link_end(wl, hlid);
556
557 /* Start high-level PS if the STA is asleep with enough blocks in FW */
558 else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
559 wl1271_ps_link_start(wl, hlid, true);
560}
561
562static void wl1271_irq_update_links_status(struct wl1271 *wl,
563 struct wl1271_fw_ap_status *status)
564{
565 u32 cur_fw_ps_map;
566 u8 hlid;
567
568 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
569 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
570 wl1271_debug(DEBUG_PSM,
571 "link ps prev 0x%x cur 0x%x changed 0x%x",
572 wl->ap_fw_ps_map, cur_fw_ps_map,
573 wl->ap_fw_ps_map ^ cur_fw_ps_map);
574
575 wl->ap_fw_ps_map = cur_fw_ps_map;
576 }
577
578 for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
579 u8 cnt = status->tx_lnk_free_blks[hlid] -
580 wl->links[hlid].prev_freed_blks;
581
582 wl->links[hlid].prev_freed_blks =
583 status->tx_lnk_free_blks[hlid];
584 wl->links[hlid].allocated_blks -= cnt;
585
586 wl1271_irq_ps_regulate_link(wl, hlid,
587 wl->links[hlid].allocated_blks);
588 }
589}
590
536static void wl1271_fw_status(struct wl1271 *wl, 591static void wl1271_fw_status(struct wl1271 *wl,
537 struct wl1271_fw_full_status *full_status) 592 struct wl1271_fw_full_status *full_status)
538{ 593{
@@ -570,6 +625,10 @@ static void wl1271_fw_status(struct wl1271 *wl,
570 if (total) 625 if (total)
571 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 626 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
572 627
628 /* for AP update num of allocated TX blocks per link and ps status */
629 if (wl->bss_type == BSS_TYPE_AP_BSS)
630 wl1271_irq_update_links_status(wl, &full_status->ap);
631
573 /* update the host-chipset time offset */ 632 /* update the host-chipset time offset */
574 getnstimeofday(&ts); 633 getnstimeofday(&ts);
575 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 634 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
@@ -975,19 +1034,37 @@ int wl1271_plt_stop(struct wl1271 *wl)
975 return ret; 1034 return ret;
976} 1035}
977 1036
978static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1037static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
979{ 1038{
980 struct wl1271 *wl = hw->priv; 1039 struct wl1271 *wl = hw->priv;
981 unsigned long flags; 1040 unsigned long flags;
982 int q; 1041 int q;
1042 u8 hlid = 0;
983 1043
984 spin_lock_irqsave(&wl->wl_lock, flags); 1044 spin_lock_irqsave(&wl->wl_lock, flags);
985 wl->tx_queue_count++; 1045 wl->tx_queue_count++;
1046
1047 /*
1048 * The workqueue is slow to process the tx_queue and we need stop
1049 * the queue here, otherwise the queue will get too long.
1050 */
1051 if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1052 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
1053 ieee80211_stop_queues(wl->hw);
1054 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
1055 }
1056
986 spin_unlock_irqrestore(&wl->wl_lock, flags); 1057 spin_unlock_irqrestore(&wl->wl_lock, flags);
987 1058
988 /* queue the packet */ 1059 /* queue the packet */
989 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 1060 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
990 skb_queue_tail(&wl->tx_queue[q], skb); 1061 if (wl->bss_type == BSS_TYPE_AP_BSS) {
1062 hlid = wl1271_tx_get_hlid(skb);
1063 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1064 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1065 } else {
1066 skb_queue_tail(&wl->tx_queue[q], skb);
1067 }
991 1068
992 /* 1069 /*
993 * The chip specific setup must run before the first TX packet - 1070 * The chip specific setup must run before the first TX packet -
@@ -996,21 +1073,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
996 1073
997 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1074 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
998 ieee80211_queue_work(wl->hw, &wl->tx_work); 1075 ieee80211_queue_work(wl->hw, &wl->tx_work);
999
1000 /*
1001 * The workqueue is slow to process the tx_queue and we need stop
1002 * the queue here, otherwise the queue will get too long.
1003 */
1004 if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1005 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
1006
1007 spin_lock_irqsave(&wl->wl_lock, flags);
1008 ieee80211_stop_queues(wl->hw);
1009 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
1010 spin_unlock_irqrestore(&wl->wl_lock, flags);
1011 }
1012
1013 return NETDEV_TX_OK;
1014} 1076}
1015 1077
1016static struct notifier_block wl1271_dev_notifier = { 1078static struct notifier_block wl1271_dev_notifier = {
@@ -1221,6 +1283,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1221 wl->filters = 0; 1283 wl->filters = 0;
1222 wl1271_free_ap_keys(wl); 1284 wl1271_free_ap_keys(wl);
1223 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); 1285 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
1286 wl->ap_fw_ps_map = 0;
1287 wl->ap_ps_map = 0;
1224 1288
1225 for (i = 0; i < NUM_TX_QUEUES; i++) 1289 for (i = 0; i < NUM_TX_QUEUES; i++)
1226 wl->tx_blocks_freed[i] = 0; 1290 wl->tx_blocks_freed[i] = 0;
@@ -2218,6 +2282,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2218 u32 sta_rate_set = 0; 2282 u32 sta_rate_set = 0;
2219 int ret; 2283 int ret;
2220 struct ieee80211_sta *sta; 2284 struct ieee80211_sta *sta;
2285 bool sta_exists = false;
2286 struct ieee80211_sta_ht_cap sta_ht_cap;
2221 2287
2222 if (is_ibss) { 2288 if (is_ibss) {
2223 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, 2289 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
@@ -2289,16 +2355,20 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2289 if (sta->ht_cap.ht_supported) 2355 if (sta->ht_cap.ht_supported)
2290 sta_rate_set |= 2356 sta_rate_set |=
2291 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); 2357 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
2358 sta_ht_cap = sta->ht_cap;
2359 sta_exists = true;
2360 }
2361 rcu_read_unlock();
2292 2362
2363 if (sta_exists) {
2293 /* handle new association with HT and HT information change */ 2364 /* handle new association with HT and HT information change */
2294 if ((changed & BSS_CHANGED_HT) && 2365 if ((changed & BSS_CHANGED_HT) &&
2295 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 2366 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2296 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, 2367 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
2297 true); 2368 true);
2298 if (ret < 0) { 2369 if (ret < 0) {
2299 wl1271_warning("Set ht cap true failed %d", 2370 wl1271_warning("Set ht cap true failed %d",
2300 ret); 2371 ret);
2301 rcu_read_unlock();
2302 goto out; 2372 goto out;
2303 } 2373 }
2304 ret = wl1271_acx_set_ht_information(wl, 2374 ret = wl1271_acx_set_ht_information(wl,
@@ -2306,23 +2376,20 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2306 if (ret < 0) { 2376 if (ret < 0) {
2307 wl1271_warning("Set ht information failed %d", 2377 wl1271_warning("Set ht information failed %d",
2308 ret); 2378 ret);
2309 rcu_read_unlock();
2310 goto out; 2379 goto out;
2311 } 2380 }
2312 } 2381 }
2313 /* handle new association without HT and disassociation */ 2382 /* handle new association without HT and disassociation */
2314 else if (changed & BSS_CHANGED_ASSOC) { 2383 else if (changed & BSS_CHANGED_ASSOC) {
2315 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, 2384 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
2316 false); 2385 false);
2317 if (ret < 0) { 2386 if (ret < 0) {
2318 wl1271_warning("Set ht cap false failed %d", 2387 wl1271_warning("Set ht cap false failed %d",
2319 ret); 2388 ret);
2320 rcu_read_unlock();
2321 goto out; 2389 goto out;
2322 } 2390 }
2323 } 2391 }
2324 } 2392 }
2325 rcu_read_unlock();
2326 2393
2327 if ((changed & BSS_CHANGED_ASSOC)) { 2394 if ((changed & BSS_CHANGED_ASSOC)) {
2328 if (bss_conf->assoc) { 2395 if (bss_conf->assoc) {
@@ -2612,7 +2679,7 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
2612 return 0; 2679 return 0;
2613} 2680}
2614 2681
2615static int wl1271_allocate_hlid(struct wl1271 *wl, 2682static int wl1271_allocate_sta(struct wl1271 *wl,
2616 struct ieee80211_sta *sta, 2683 struct ieee80211_sta *sta,
2617 u8 *hlid) 2684 u8 *hlid)
2618{ 2685{
@@ -2626,18 +2693,25 @@ static int wl1271_allocate_hlid(struct wl1271 *wl,
2626 } 2693 }
2627 2694
2628 wl_sta = (struct wl1271_station *)sta->drv_priv; 2695 wl_sta = (struct wl1271_station *)sta->drv_priv;
2629
2630 __set_bit(id, wl->ap_hlid_map); 2696 __set_bit(id, wl->ap_hlid_map);
2631 wl_sta->hlid = WL1271_AP_STA_HLID_START + id; 2697 wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
2632 *hlid = wl_sta->hlid; 2698 *hlid = wl_sta->hlid;
2699 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
2633 return 0; 2700 return 0;
2634} 2701}
2635 2702
2636static void wl1271_free_hlid(struct wl1271 *wl, u8 hlid) 2703static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
2637{ 2704{
2638 int id = hlid - WL1271_AP_STA_HLID_START; 2705 int id = hlid - WL1271_AP_STA_HLID_START;
2639 2706
2707 if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
2708 return;
2709
2640 __clear_bit(id, wl->ap_hlid_map); 2710 __clear_bit(id, wl->ap_hlid_map);
2711 memset(wl->links[hlid].addr, 0, ETH_ALEN);
2712 wl1271_tx_reset_link_queues(wl, hlid);
2713 __clear_bit(hlid, &wl->ap_ps_map);
2714 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
2641} 2715}
2642 2716
2643static int wl1271_op_sta_add(struct ieee80211_hw *hw, 2717static int wl1271_op_sta_add(struct ieee80211_hw *hw,
@@ -2658,13 +2732,13 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
2658 2732
2659 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); 2733 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
2660 2734
2661 ret = wl1271_allocate_hlid(wl, sta, &hlid); 2735 ret = wl1271_allocate_sta(wl, sta, &hlid);
2662 if (ret < 0) 2736 if (ret < 0)
2663 goto out; 2737 goto out;
2664 2738
2665 ret = wl1271_ps_elp_wakeup(wl, false); 2739 ret = wl1271_ps_elp_wakeup(wl, false);
2666 if (ret < 0) 2740 if (ret < 0)
2667 goto out; 2741 goto out_free_sta;
2668 2742
2669 ret = wl1271_cmd_add_sta(wl, sta, hlid); 2743 ret = wl1271_cmd_add_sta(wl, sta, hlid);
2670 if (ret < 0) 2744 if (ret < 0)
@@ -2673,6 +2747,10 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
2673out_sleep: 2747out_sleep:
2674 wl1271_ps_elp_sleep(wl); 2748 wl1271_ps_elp_sleep(wl);
2675 2749
2750out_free_sta:
2751 if (ret < 0)
2752 wl1271_free_sta(wl, hlid);
2753
2676out: 2754out:
2677 mutex_unlock(&wl->mutex); 2755 mutex_unlock(&wl->mutex);
2678 return ret; 2756 return ret;
@@ -2709,7 +2787,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
2709 if (ret < 0) 2787 if (ret < 0)
2710 goto out_sleep; 2788 goto out_sleep;
2711 2789
2712 wl1271_free_hlid(wl, wl_sta->hlid); 2790 wl1271_free_sta(wl, wl_sta->hlid);
2713 2791
2714out_sleep: 2792out_sleep:
2715 wl1271_ps_elp_sleep(wl); 2793 wl1271_ps_elp_sleep(wl);
@@ -3212,7 +3290,9 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
3212 IEEE80211_HW_SUPPORTS_UAPSD | 3290 IEEE80211_HW_SUPPORTS_UAPSD |
3213 IEEE80211_HW_HAS_RATE_CONTROL | 3291 IEEE80211_HW_HAS_RATE_CONTROL |
3214 IEEE80211_HW_CONNECTION_MONITOR | 3292 IEEE80211_HW_CONNECTION_MONITOR |
3215 IEEE80211_HW_SUPPORTS_CQM_RSSI; 3293 IEEE80211_HW_SUPPORTS_CQM_RSSI |
3294 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
3295 IEEE80211_HW_AP_LINK_PS;
3216 3296
3217 wl->hw->wiphy->cipher_suites = cipher_suites; 3297 wl->hw->wiphy->cipher_suites = cipher_suites;
3218 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 3298 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -3264,7 +3344,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
3264 struct ieee80211_hw *hw; 3344 struct ieee80211_hw *hw;
3265 struct platform_device *plat_dev = NULL; 3345 struct platform_device *plat_dev = NULL;
3266 struct wl1271 *wl; 3346 struct wl1271 *wl;
3267 int i, ret; 3347 int i, j, ret;
3268 unsigned int order; 3348 unsigned int order;
3269 3349
3270 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 3350 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -3292,6 +3372,10 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
3292 for (i = 0; i < NUM_TX_QUEUES; i++) 3372 for (i = 0; i < NUM_TX_QUEUES; i++)
3293 skb_queue_head_init(&wl->tx_queue[i]); 3373 skb_queue_head_init(&wl->tx_queue[i]);
3294 3374
3375 for (i = 0; i < NUM_TX_QUEUES; i++)
3376 for (j = 0; j < AP_MAX_LINKS; j++)
3377 skb_queue_head_init(&wl->links[j].tx_queue[i]);
3378
3295 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 3379 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
3296 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); 3380 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
3297 INIT_WORK(&wl->irq_work, wl1271_irq_work); 3381 INIT_WORK(&wl->irq_work, wl1271_irq_work);
@@ -3317,6 +3401,9 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
3317 wl->bss_type = MAX_BSS_TYPE; 3401 wl->bss_type = MAX_BSS_TYPE;
3318 wl->set_bss_type = MAX_BSS_TYPE; 3402 wl->set_bss_type = MAX_BSS_TYPE;
3319 wl->fw_bss_type = MAX_BSS_TYPE; 3403 wl->fw_bss_type = MAX_BSS_TYPE;
3404 wl->last_tx_hlid = 0;
3405 wl->ap_ps_map = 0;
3406 wl->ap_fw_ps_map = 0;
3320 3407
3321 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 3408 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
3322 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 3409 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -3412,5 +3499,5 @@ module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
3412MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 3499MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
3413 3500
3414MODULE_LICENSE("GPL"); 3501MODULE_LICENSE("GPL");
3415MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 3502MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
3416MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 3503MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 2d3086ae6338..5c347b1bd17f 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -24,6 +24,7 @@
24#include "reg.h" 24#include "reg.h"
25#include "ps.h" 25#include "ps.h"
26#include "io.h" 26#include "io.h"
27#include "tx.h"
27 28
28#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
29 30
@@ -173,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
173 return ret; 174 return ret;
174} 175}
175 176
177static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
178{
179 int i, filtered = 0;
180 struct sk_buff *skb;
181 struct ieee80211_tx_info *info;
182 unsigned long flags;
183
184 /* filter all frames currently the low level queus for this hlid */
185 for (i = 0; i < NUM_TX_QUEUES; i++) {
186 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
187 info = IEEE80211_SKB_CB(skb);
188 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
189 info->status.rates[0].idx = -1;
190 ieee80211_tx_status(wl->hw, skb);
191 filtered++;
192 }
193 }
194
195 spin_lock_irqsave(&wl->wl_lock, flags);
196 wl->tx_queue_count -= filtered;
197 spin_unlock_irqrestore(&wl->wl_lock, flags);
198
199 wl1271_handle_tx_low_watermark(wl);
200}
201
202void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
203{
204 struct ieee80211_sta *sta;
205
206 if (test_bit(hlid, &wl->ap_ps_map))
207 return;
208
209 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
210 "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
211 clean_queues);
212
213 rcu_read_lock();
214 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
215 if (!sta) {
216 wl1271_error("could not find sta %pM for starting ps",
217 wl->links[hlid].addr);
218 rcu_read_unlock();
219 return;
220 }
176 221
222 ieee80211_sta_ps_transition_ni(sta, true);
223 rcu_read_unlock();
224
225 /* do we want to filter all frames from this link's queues? */
226 if (clean_queues)
227 wl1271_ps_filter_frames(wl, hlid);
228
229 __set_bit(hlid, &wl->ap_ps_map);
230}
231
232void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
233{
234 struct ieee80211_sta *sta;
235
236 if (!test_bit(hlid, &wl->ap_ps_map))
237 return;
238
239 wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
240
241 __clear_bit(hlid, &wl->ap_ps_map);
242
243 rcu_read_lock();
244 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
245 if (!sta) {
246 wl1271_error("could not find sta %pM for ending ps",
247 wl->links[hlid].addr);
248 goto end;
249 }
250
251 ieee80211_sta_ps_transition_ni(sta, false);
252end:
253 rcu_read_unlock();
254}
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 8415060f08e5..fc1f4c193593 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -32,5 +32,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
32void wl1271_ps_elp_sleep(struct wl1271 *wl); 32void wl1271_ps_elp_sleep(struct wl1271 *wl);
33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
34void wl1271_elp_work(struct work_struct *work); 34void wl1271_elp_work(struct work_struct *work);
35void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
36void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
35 37
36#endif /* __WL1271_PS_H__ */ 38#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 00d250d8da18..3d13d7a83ea1 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
92{ 92{
93 struct wl1271_rx_descriptor *desc; 93 struct wl1271_rx_descriptor *desc;
94 struct sk_buff *skb; 94 struct sk_buff *skb;
95 u16 *fc; 95 struct ieee80211_hdr *hdr;
96 u8 *buf; 96 u8 *buf;
97 u8 beacon = 0; 97 u8 beacon = 0;
98 98
@@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
118 /* now we pull the descriptor out of the buffer */ 118 /* now we pull the descriptor out of the buffer */
119 skb_pull(skb, sizeof(*desc)); 119 skb_pull(skb, sizeof(*desc));
120 120
121 fc = (u16 *)skb->data; 121 hdr = (struct ieee80211_hdr *)skb->data;
122 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 122 if (ieee80211_is_beacon(hdr->frame_control))
123 beacon = 1; 123 beacon = 1;
124 124
125 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 125 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 4cef8fa3dee1..75fabf836491 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -30,10 +30,6 @@
30#define WL1271_RX_MAX_RSSI -30 30#define WL1271_RX_MAX_RSSI -30
31#define WL1271_RX_MIN_RSSI -95 31#define WL1271_RX_MIN_RSSI -95
32 32
33#define WL1271_RX_ALIGN_TO 4
34#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
35 ~(WL1271_RX_ALIGN_TO - 1))
36
37#define SHORT_PREAMBLE_BIT BIT(0) 33#define SHORT_PREAMBLE_BIT BIT(0)
38#define OFDM_RATE_BIT BIT(6) 34#define OFDM_RATE_BIT BIT(6)
39#define PBCC_RATE_BIT BIT(7) 35#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 67a00946e3dd..ac60d577319f 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -70,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
70 } 70 }
71} 71}
72 72
73static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 /*
79 * add the station to the known list before transmitting the
80 * authentication response. this way it won't get de-authed by FW
81 * when transmitting too soon.
82 */
83 hdr = (struct ieee80211_hdr *)(skb->data +
84 sizeof(struct wl1271_tx_hw_descr));
85 if (ieee80211_is_auth(hdr->frame_control))
86 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
87}
88
89static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
90{
91 bool fw_ps;
92 u8 tx_blks;
93
94 /* only regulate station links */
95 if (hlid < WL1271_AP_STA_HLID_START)
96 return;
97
98 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
99 tx_blks = wl->links[hlid].allocated_blks;
100
101 /*
102 * if in FW PS and there is enough data in FW we can put the link
103 * into high-level PS and clean out its TX queues.
104 */
105 if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
106 wl1271_ps_link_start(wl, hlid, true);
107}
108
109u8 wl1271_tx_get_hlid(struct sk_buff *skb)
110{
111 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
112
113 if (control->control.sta) {
114 struct wl1271_station *wl_sta;
115
116 wl_sta = (struct wl1271_station *)
117 control->control.sta->drv_priv;
118 return wl_sta->hlid;
119 } else {
120 struct ieee80211_hdr *hdr;
121
122 hdr = (struct ieee80211_hdr *)skb->data;
123 if (ieee80211_is_mgmt(hdr->frame_control))
124 return WL1271_AP_GLOBAL_HLID;
125 else
126 return WL1271_AP_BROADCAST_HLID;
127 }
128}
129
73static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 130static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
74 u32 buf_offset) 131 u32 buf_offset, u8 hlid)
75{ 132{
76 struct wl1271_tx_hw_descr *desc; 133 struct wl1271_tx_hw_descr *desc;
77 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 134 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -100,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
100 157
101 wl->tx_blocks_available -= total_blocks; 158 wl->tx_blocks_available -= total_blocks;
102 159
160 if (wl->bss_type == BSS_TYPE_AP_BSS)
161 wl->links[hlid].allocated_blks += total_blocks;
162
103 ret = 0; 163 ret = 0;
104 164
105 wl1271_debug(DEBUG_TX, 165 wl1271_debug(DEBUG_TX,
@@ -113,7 +173,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
113} 173}
114 174
115static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 175static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
116 u32 extra, struct ieee80211_tx_info *control) 176 u32 extra, struct ieee80211_tx_info *control,
177 u8 hlid)
117{ 178{
118 struct timespec ts; 179 struct timespec ts;
119 struct wl1271_tx_hw_descr *desc; 180 struct wl1271_tx_hw_descr *desc;
@@ -149,7 +210,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
149 desc->tid = ac; 210 desc->tid = ac;
150 211
151 if (wl->bss_type != BSS_TYPE_AP_BSS) { 212 if (wl->bss_type != BSS_TYPE_AP_BSS) {
152 desc->aid = TX_HW_DEFAULT_AID; 213 desc->aid = hlid;
153 214
154 /* if the packets are destined for AP (have a STA entry) 215 /* if the packets are destined for AP (have a STA entry)
155 send them with AP rate policies, otherwise use default 216 send them with AP rate policies, otherwise use default
@@ -159,25 +220,17 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
159 else 220 else
160 rate_idx = ACX_TX_BASIC_RATE; 221 rate_idx = ACX_TX_BASIC_RATE;
161 } else { 222 } else {
162 if (control->control.sta) { 223 desc->hlid = hlid;
163 struct wl1271_station *wl_sta; 224 switch (hlid) {
164 225 case WL1271_AP_GLOBAL_HLID:
165 wl_sta = (struct wl1271_station *) 226 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
166 control->control.sta->drv_priv; 227 break;
167 desc->hlid = wl_sta->hlid; 228 case WL1271_AP_BROADCAST_HLID:
229 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
230 break;
231 default:
168 rate_idx = ac; 232 rate_idx = ac;
169 } else { 233 break;
170 struct ieee80211_hdr *hdr;
171
172 hdr = (struct ieee80211_hdr *)
173 (skb->data + sizeof(*desc));
174 if (ieee80211_is_mgmt(hdr->frame_control)) {
175 desc->hlid = WL1271_AP_GLOBAL_HLID;
176 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
177 } else {
178 desc->hlid = WL1271_AP_BROADCAST_HLID;
179 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
180 }
181 } 234 }
182 } 235 }
183 236
@@ -185,7 +238,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
185 desc->reserved = 0; 238 desc->reserved = 0;
186 239
187 /* align the length (and store in terms of words) */ 240 /* align the length (and store in terms of words) */
188 pad = WL1271_TX_ALIGN(skb->len); 241 pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
189 desc->length = cpu_to_le16(pad >> 2); 242 desc->length = cpu_to_le16(pad >> 2);
190 243
191 /* calculate number of padding bytes */ 244 /* calculate number of padding bytes */
@@ -208,6 +261,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
208 u32 extra = 0; 261 u32 extra = 0;
209 int ret = 0; 262 int ret = 0;
210 u32 total_len; 263 u32 total_len;
264 u8 hlid;
211 265
212 if (!skb) 266 if (!skb)
213 return -EINVAL; 267 return -EINVAL;
@@ -234,18 +288,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
234 } 288 }
235 } 289 }
236 290
237 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset); 291 if (wl->bss_type == BSS_TYPE_AP_BSS)
292 hlid = wl1271_tx_get_hlid(skb);
293 else
294 hlid = TX_HW_DEFAULT_AID;
295
296 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
238 if (ret < 0) 297 if (ret < 0)
239 return ret; 298 return ret;
240 299
241 wl1271_tx_fill_hdr(wl, skb, extra, info); 300 if (wl->bss_type == BSS_TYPE_AP_BSS) {
301 wl1271_tx_ap_update_inconnection_sta(wl, skb);
302 wl1271_tx_regulate_link(wl, hlid);
303 }
304
305 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
242 306
243 /* 307 /*
244 * The length of each packet is stored in terms of words. Thus, we must 308 * The length of each packet is stored in terms of words. Thus, we must
245 * pad the skb data to make sure its length is aligned. 309 * pad the skb data to make sure its length is aligned.
246 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr 310 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
247 */ 311 */
248 total_len = WL1271_TX_ALIGN(skb->len); 312 total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
249 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 313 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
250 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 314 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
251 315
@@ -279,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
279 return enabled_rates; 343 return enabled_rates;
280} 344}
281 345
282static void handle_tx_low_watermark(struct wl1271 *wl) 346void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
283{ 347{
284 unsigned long flags; 348 unsigned long flags;
285 349
@@ -293,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
293 } 357 }
294} 358}
295 359
296static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 360static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
297{ 361{
298 struct sk_buff *skb = NULL; 362 struct sk_buff *skb = NULL;
299 unsigned long flags; 363 unsigned long flags;
@@ -319,12 +383,69 @@ out:
319 return skb; 383 return skb;
320} 384}
321 385
386static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
387{
388 struct sk_buff *skb = NULL;
389 unsigned long flags;
390 int i, h, start_hlid;
391
392 /* start from the link after the last one */
393 start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
394
395 /* dequeue according to AC, round robin on each link */
396 for (i = 0; i < AP_MAX_LINKS; i++) {
397 h = (start_hlid + i) % AP_MAX_LINKS;
398
399 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
400 if (skb)
401 goto out;
402 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
403 if (skb)
404 goto out;
405 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
406 if (skb)
407 goto out;
408 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
409 if (skb)
410 goto out;
411 }
412
413out:
414 if (skb) {
415 wl->last_tx_hlid = h;
416 spin_lock_irqsave(&wl->wl_lock, flags);
417 wl->tx_queue_count--;
418 spin_unlock_irqrestore(&wl->wl_lock, flags);
419 } else {
420 wl->last_tx_hlid = 0;
421 }
422
423 return skb;
424}
425
426static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
427{
428 if (wl->bss_type == BSS_TYPE_AP_BSS)
429 return wl1271_ap_skb_dequeue(wl);
430
431 return wl1271_sta_skb_dequeue(wl);
432}
433
322static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) 434static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
323{ 435{
324 unsigned long flags; 436 unsigned long flags;
325 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 437 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
326 438
327 skb_queue_head(&wl->tx_queue[q], skb); 439 if (wl->bss_type == BSS_TYPE_AP_BSS) {
440 u8 hlid = wl1271_tx_get_hlid(skb);
441 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
442
443 /* make sure we dequeue the same packet next time */
444 wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
445 } else {
446 skb_queue_head(&wl->tx_queue[q], skb);
447 }
448
328 spin_lock_irqsave(&wl->wl_lock, flags); 449 spin_lock_irqsave(&wl->wl_lock, flags);
329 wl->tx_queue_count++; 450 wl->tx_queue_count++;
330 spin_unlock_irqrestore(&wl->wl_lock, flags); 451 spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -387,7 +508,7 @@ out_ack:
387 if (sent_packets) { 508 if (sent_packets) {
388 /* interrupt the firmware with the new packets */ 509 /* interrupt the firmware with the new packets */
389 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 510 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
390 handle_tx_low_watermark(wl); 511 wl1271_handle_tx_low_watermark(wl);
391 } 512 }
392 513
393out: 514out:
@@ -504,32 +625,76 @@ void wl1271_tx_complete(struct wl1271 *wl)
504 } 625 }
505} 626}
506 627
628void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
629{
630 struct sk_buff *skb;
631 int i, total = 0;
632 unsigned long flags;
633 struct ieee80211_tx_info *info;
634
635 for (i = 0; i < NUM_TX_QUEUES; i++) {
636 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
637 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
638 info = IEEE80211_SKB_CB(skb);
639 info->status.rates[0].idx = -1;
640 info->status.rates[0].count = 0;
641 ieee80211_tx_status(wl->hw, skb);
642 total++;
643 }
644 }
645
646 spin_lock_irqsave(&wl->wl_lock, flags);
647 wl->tx_queue_count -= total;
648 spin_unlock_irqrestore(&wl->wl_lock, flags);
649
650 wl1271_handle_tx_low_watermark(wl);
651}
652
507/* caller must hold wl->mutex */ 653/* caller must hold wl->mutex */
508void wl1271_tx_reset(struct wl1271 *wl) 654void wl1271_tx_reset(struct wl1271 *wl)
509{ 655{
510 int i; 656 int i;
511 struct sk_buff *skb; 657 struct sk_buff *skb;
658 struct ieee80211_tx_info *info;
512 659
513 /* TX failure */ 660 /* TX failure */
514 for (i = 0; i < NUM_TX_QUEUES; i++) { 661 if (wl->bss_type == BSS_TYPE_AP_BSS) {
515 while ((skb = skb_dequeue(&wl->tx_queue[i]))) { 662 for (i = 0; i < AP_MAX_LINKS; i++) {
516 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 663 wl1271_tx_reset_link_queues(wl, i);
517 ieee80211_tx_status(wl->hw, skb); 664 wl->links[i].allocated_blks = 0;
665 wl->links[i].prev_freed_blks = 0;
666 }
667
668 wl->last_tx_hlid = 0;
669 } else {
670 for (i = 0; i < NUM_TX_QUEUES; i++) {
671 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
672 wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
673 skb);
674 info = IEEE80211_SKB_CB(skb);
675 info->status.rates[0].idx = -1;
676 info->status.rates[0].count = 0;
677 ieee80211_tx_status(wl->hw, skb);
678 }
518 } 679 }
519 } 680 }
681
520 wl->tx_queue_count = 0; 682 wl->tx_queue_count = 0;
521 683
522 /* 684 /*
523 * Make sure the driver is at a consistent state, in case this 685 * Make sure the driver is at a consistent state, in case this
524 * function is called from a context other than interface removal. 686 * function is called from a context other than interface removal.
525 */ 687 */
526 handle_tx_low_watermark(wl); 688 wl1271_handle_tx_low_watermark(wl);
527 689
528 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 690 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
529 if (wl->tx_frames[i] != NULL) { 691 if (wl->tx_frames[i] != NULL) {
530 skb = wl->tx_frames[i]; 692 skb = wl->tx_frames[i];
531 wl1271_free_tx_id(wl, i); 693 wl1271_free_tx_id(wl, i);
532 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 694 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
695 info = IEEE80211_SKB_CB(skb);
696 info->status.rates[0].idx = -1;
697 info->status.rates[0].count = 0;
533 ieee80211_tx_status(wl->hw, skb); 698 ieee80211_tx_status(wl->hw, skb);
534 } 699 }
535} 700}
@@ -544,8 +709,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
544 709
545 while (!time_after(jiffies, timeout)) { 710 while (!time_after(jiffies, timeout)) {
546 mutex_lock(&wl->mutex); 711 mutex_lock(&wl->mutex);
547 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", 712 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
548 wl->tx_frames_cnt); 713 wl->tx_frames_cnt, wl->tx_queue_count);
549 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { 714 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
550 mutex_unlock(&wl->mutex); 715 mutex_unlock(&wl->mutex);
551 return; 716 return;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 05722a560d91..02f07fa66e82 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -53,8 +53,6 @@
53#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf 53#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
54 54
55#define WL1271_TX_ALIGN_TO 4 55#define WL1271_TX_ALIGN_TO 4
56#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
57 ~(WL1271_TX_ALIGN_TO - 1))
58#define WL1271_TKIP_IV_SPACE 4 56#define WL1271_TKIP_IV_SPACE 4
59 57
60struct wl1271_tx_hw_descr { 58struct wl1271_tx_hw_descr {
@@ -152,5 +150,8 @@ void wl1271_tx_flush(struct wl1271 *wl);
152u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 150u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
153u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 151u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
154u32 wl1271_tx_min_rate_get(struct wl1271 *wl); 152u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
153u8 wl1271_tx_get_hlid(struct sk_buff *skb);
154void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
155void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
155 156
156#endif 157#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1d6c94304b1a..338acc9f60b3 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -153,6 +153,17 @@ extern u32 wl12xx_debug_level;
153#define WL1271_AP_BROADCAST_HLID 1 153#define WL1271_AP_BROADCAST_HLID 1
154#define WL1271_AP_STA_HLID_START 2 154#define WL1271_AP_STA_HLID_START 2
155 155
156/*
157 * When in AP-mode, we allow (at least) this number of mem-blocks
158 * to be transmitted to FW for a STA in PS-mode. Only when packets are
159 * present in the FW buffers it will wake the sleeping STA. We want to put
160 * enough packets for the driver to transmit all of its buffered data before
161 * the STA goes to sleep again. But we don't want to take too much mem-blocks
162 * as it might hurt the throughput of active STAs.
163 * The number of blocks (18) is enough for 2 large packets.
164 */
165#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
166
156#define WL1271_AP_BSS_INDEX 0 167#define WL1271_AP_BSS_INDEX 0
157#define WL1271_AP_DEF_INACTIV_SEC 300 168#define WL1271_AP_DEF_INACTIV_SEC 300
158#define WL1271_AP_DEF_BEACON_EXP 20 169#define WL1271_AP_DEF_BEACON_EXP 20
@@ -319,6 +330,17 @@ enum wl12xx_flags {
319 WL1271_FLAG_AP_STARTED 330 WL1271_FLAG_AP_STARTED
320}; 331};
321 332
333struct wl1271_link {
334 /* AP-mode - TX queue per AC in link */
335 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
336
337 /* accounting for allocated / available TX blocks in FW */
338 u8 allocated_blks;
339 u8 prev_freed_blks;
340
341 u8 addr[ETH_ALEN];
342};
343
322struct wl1271 { 344struct wl1271 {
323 struct platform_device *plat_dev; 345 struct platform_device *plat_dev;
324 struct ieee80211_hw *hw; 346 struct ieee80211_hw *hw;
@@ -498,6 +520,21 @@ struct wl1271 {
498 /* RX BA constraint value */ 520 /* RX BA constraint value */
499 bool ba_support; 521 bool ba_support;
500 u8 ba_rx_bitmap; 522 u8 ba_rx_bitmap;
523
524 /*
525 * AP-mode - links indexed by HLID. The global and broadcast links
526 * are always active.
527 */
528 struct wl1271_link links[AP_MAX_LINKS];
529
530 /* the hlid of the link where the last transmitted skb came from */
531 int last_tx_hlid;
532
533 /* AP-mode - a bitmap of links currently in PS mode according to FW */
534 u32 ap_fw_ps_map;
535
536 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
537 unsigned long ap_ps_map;
501}; 538};
502 539
503struct wl1271_station { 540struct wl1271_station {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 74a269ebbeb9..5037c8b2b415 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -850,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
850 * control block of the skbuff will be initialized. If necessary the incoming 850 * control block of the skbuff will be initialized. If necessary the incoming
851 * mac80211 queues will be stopped. 851 * mac80211 queues will be stopped.
852 */ 852 */
853static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 853static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
854{ 854{
855 struct zd_mac *mac = zd_hw_mac(hw); 855 struct zd_mac *mac = zd_hw_mac(hw);
856 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 856 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -865,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
865 r = zd_usb_tx(&mac->chip.usb, skb); 865 r = zd_usb_tx(&mac->chip.usb, skb);
866 if (r) 866 if (r)
867 goto fail; 867 goto fail;
868 return 0; 868 return;
869 869
870fail: 870fail:
871 dev_kfree_skb(skb); 871 dev_kfree_skb(skb);
872 return 0;
873} 872}
874 873
875/** 874/**
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig NFC_DEVICES 5menuconfig NFC_DEVICES
6 bool "NFC devices" 6 bool "Near Field Communication (NFC) devices"
7 default n 7 default n
8 ---help--- 8 ---help---
9 You'll have to say Y if your computer contains an NFC device that 9 You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
60struct pn544_info { 60struct pn544_info {
61 struct miscdevice miscdev; 61 struct miscdevice miscdev;
62 struct i2c_client *i2c_dev; 62 struct i2c_client *i2c_dev;
63 struct regulator_bulk_data regs[2]; 63 struct regulator_bulk_data regs[3];
64 64
65 enum pn544_state state; 65 enum pn544_state state;
66 wait_queue_head_t read_wait; 66 wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
74 74
75static const char reg_vdd_io[] = "Vdd_IO"; 75static const char reg_vdd_io[] = "Vdd_IO";
76static const char reg_vbat[] = "VBat"; 76static const char reg_vbat[] = "VBat";
77static const char reg_vsim[] = "VSim";
77 78
78/* sysfs interface */ 79/* sysfs interface */
79static ssize_t pn544_test(struct device *dev, 80static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
740 741
741 info->regs[0].supply = reg_vdd_io; 742 info->regs[0].supply = reg_vdd_io;
742 info->regs[1].supply = reg_vbat; 743 info->regs[1].supply = reg_vbat;
744 info->regs[2].supply = reg_vsim;
743 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), 745 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
744 info->regs); 746 info->regs);
745 if (r < 0) 747 if (r < 0)
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0a50f6..4d87b5dc9284 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
36 (p)->unique_id = of_pdt_unique_id++; \ 36 (p)->unique_id = of_pdt_unique_id++; \
37} while (0) 37} while (0)
38 38
39static inline const char *of_pdt_node_name(struct device_node *dp) 39static char * __init of_pdt_build_full_name(struct device_node *dp)
40{ 40{
41 return dp->path_component_name; 41 int len, ourlen, plen;
42 char *n;
43
44 dp->path_component_name = build_path_component(dp);
45
46 plen = strlen(dp->parent->full_name);
47 ourlen = strlen(dp->path_component_name);
48 len = ourlen + plen + 2;
49
50 n = prom_early_alloc(len);
51 strcpy(n, dp->parent->full_name);
52 if (!of_node_is_root(dp->parent)) {
53 strcpy(n + plen, "/");
54 plen++;
55 }
56 strcpy(n + plen, dp->path_component_name);
57
58 return n;
42} 59}
43 60
44#else 61#else /* CONFIG_SPARC */
45 62
46static inline void of_pdt_incr_unique_id(void *p) { } 63static inline void of_pdt_incr_unique_id(void *p) { }
47static inline void irq_trans_init(struct device_node *dp) { } 64static inline void irq_trans_init(struct device_node *dp) { }
48 65
49static inline const char *of_pdt_node_name(struct device_node *dp) 66static char * __init of_pdt_build_full_name(struct device_node *dp)
50{ 67{
51 return dp->name; 68 static int failsafe_id = 0; /* for generating unique names on failure */
69 char *buf;
70 int len;
71
72 if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
73 goto failsafe;
74
75 buf = prom_early_alloc(len + 1);
76 if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
77 goto failsafe;
78 return buf;
79
80 failsafe:
81 buf = prom_early_alloc(strlen(dp->parent->full_name) +
82 strlen(dp->name) + 16);
83 sprintf(buf, "%s/%s@unknown%i",
84 of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
85 dp->name, failsafe_id++);
86 pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
87 return buf;
52} 88}
53 89
54#endif /* !CONFIG_SPARC */ 90#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
132 return buf; 168 return buf;
133} 169}
134 170
135static char * __init of_pdt_try_pkg2path(phandle node)
136{
137 char *res, *buf = NULL;
138 int len;
139
140 if (!of_pdt_prom_ops->pkg2path)
141 return NULL;
142
143 if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
144 return NULL;
145 buf = prom_early_alloc(len + 1);
146 if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
147 pr_err("%s: package-to-path failed\n", __func__);
148 return NULL;
149 }
150
151 res = strrchr(buf, '/');
152 if (!res) {
153 pr_err("%s: couldn't find / in %s\n", __func__, buf);
154 return NULL;
155 }
156 return res+1;
157}
158
159/*
160 * When fetching the node's name, first try using package-to-path; if
161 * that fails (either because the arch hasn't supplied a PROM callback,
162 * or some other random failure), fall back to just looking at the node's
163 * 'name' property.
164 */
165static char * __init of_pdt_build_name(phandle node)
166{
167 char *buf;
168
169 buf = of_pdt_try_pkg2path(node);
170 if (!buf)
171 buf = of_pdt_get_one_property(node, "name");
172
173 return buf;
174}
175
176static struct device_node * __init of_pdt_create_node(phandle node, 171static struct device_node * __init of_pdt_create_node(phandle node,
177 struct device_node *parent) 172 struct device_node *parent)
178{ 173{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
187 182
188 kref_init(&dp->kref); 183 kref_init(&dp->kref);
189 184
190 dp->name = of_pdt_build_name(node); 185 dp->name = of_pdt_get_one_property(node, "name");
191 dp->type = of_pdt_get_one_property(node, "device_type"); 186 dp->type = of_pdt_get_one_property(node, "device_type");
192 dp->phandle = node; 187 dp->phandle = node;
193 188
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
198 return dp; 193 return dp;
199} 194}
200 195
201static char * __init of_pdt_build_full_name(struct device_node *dp)
202{
203 int len, ourlen, plen;
204 char *n;
205
206 plen = strlen(dp->parent->full_name);
207 ourlen = strlen(of_pdt_node_name(dp));
208 len = ourlen + plen + 2;
209
210 n = prom_early_alloc(len);
211 strcpy(n, dp->parent->full_name);
212 if (!of_node_is_root(dp->parent)) {
213 strcpy(n + plen, "/");
214 plen++;
215 }
216 strcpy(n + plen, of_pdt_node_name(dp));
217
218 return n;
219}
220
221static struct device_node * __init of_pdt_build_tree(struct device_node *parent, 196static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
222 phandle node, 197 phandle node,
223 struct device_node ***nextp) 198 struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
240 *(*nextp) = dp; 215 *(*nextp) = dp;
241 *nextp = &dp->allnext; 216 *nextp = &dp->allnext;
242 217
243#if defined(CONFIG_SPARC)
244 dp->path_component_name = build_path_component(dp);
245#endif
246 dp->full_name = of_pdt_build_full_name(dp); 218 dp->full_name = of_pdt_build_full_name(dp);
247 219
248 dp->child = of_pdt_build_tree(dp, 220 dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3ed55..42fbf1a75576 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
518 flags |= CONF_ENABLE_IOCARD; 518 flags |= CONF_ENABLE_IOCARD;
519 if (flags & CONF_ENABLE_IOCARD) 519 if (flags & CONF_ENABLE_IOCARD)
520 s->socket.flags |= SS_IOCARD; 520 s->socket.flags |= SS_IOCARD;
521 if (flags & CONF_ENABLE_ZVCARD)
522 s->socket.flags |= SS_ZVCARD | SS_IOCARD;
521 if (flags & CONF_ENABLE_SPKR) { 523 if (flags & CONF_ENABLE_SPKR) {
522 s->socket.flags |= SS_SPKR_ENA; 524 s->socket.flags |= SS_SPKR_ENA;
523 status = CCSR_AUDIO_ENA; 525 status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c8c715..2c540542b5af 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
215} 215}
216#endif 216#endif
217 217
218static void pxa2xx_configure_sockets(struct device *dev) 218void pxa2xx_configure_sockets(struct device *dev)
219{ 219{
220 struct pcmcia_low_level *ops = dev->platform_data; 220 struct pcmcia_low_level *ops = dev->platform_data;
221 /* 221 /*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea87b8f9..b609b45469ed 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); 1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); 2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
3void pxa2xx_configure_sockets(struct device *dev);
3 4
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8fb42bd..25afe637c657 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); 226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
227 227
228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); 228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
229 pxa2xx_configure_sockets(&sadev->dev);
229 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, 230 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
230 pxa2xx_drv_pcmcia_add_one); 231 pxa2xx_drv_pcmcia_add_one);
231 } 232 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2e2b9e..a59af5b24f0a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
227config IDEAPAD_LAPTOP 227config IDEAPAD_LAPTOP
228 tristate "Lenovo IdeaPad Laptop Extras" 228 tristate "Lenovo IdeaPad Laptop Extras"
229 depends on ACPI 229 depends on ACPI
230 depends on RFKILL 230 depends on RFKILL && INPUT
231 select INPUT_SPARSEKMAP 231 select INPUT_SPARSEKMAP
232 help 232 help
233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. 233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c32eb8..38b34a73866a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
84 */ 84 */
85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" 85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" 86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
87#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" 87#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" 88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" 89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
90 90
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
1280 return -EINVAL; 1280 return -EINVAL;
1281 return count; 1281 return count;
1282} 1282}
1283static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, 1283static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
1284 set_bool_threeg); 1284 set_bool_threeg);
1285 1285
1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr, 1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8532cc..fe495939c307 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1081 struct proc_dir_entry *proc; 1081 struct proc_dir_entry *proc;
1082 mode_t mode; 1082 mode_t mode;
1083 1083
1084 /*
1085 * If parameter uid or gid is not changed, keep the default setting for
1086 * our proc entries (-rw-rw-rw-) else, it means we care about security,
1087 * and then set to -rw-rw----
1088 */
1089
1090 if ((asus_uid == 0) && (asus_gid == 0)) { 1084 if ((asus_uid == 0) && (asus_gid == 0)) {
1091 mode = S_IFREG | S_IRUGO | S_IWUGO; 1085 mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
1092 } else { 1086 } else {
1093 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; 1087 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
1094 printk(KERN_WARNING " asus_uid and asus_gid parameters are " 1088 printk(KERN_WARNING " asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f96b5a5..ad24ef36f9f7 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
290 dell_send_request(buffer, 17, 11); 290 dell_send_request(buffer, 17, 11);
291 291
292 /* If the hardware switch controls this radio, and the hardware 292 /* If the hardware switch controls this radio, and the hardware
293 switch is disabled, don't allow changing the software state */ 293 switch is disabled, don't allow changing the software state.
294 If the hardware switch is reported as not supported, always
295 fire the SMI to toggle the killswitch. */
294 if ((hwswitch_state & BIT(hwswitch_bit)) && 296 if ((hwswitch_state & BIT(hwswitch_bit)) &&
295 !(buffer->output[1] & BIT(16))) { 297 !(buffer->output[1] & BIT(16)) &&
298 (buffer->output[1] & BIT(0))) {
296 ret = -EINVAL; 299 ret = -EINVAL;
297 goto out; 300 goto out;
298 } 301 }
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
398 401
399static void dell_update_rfkill(struct work_struct *ignored) 402static void dell_update_rfkill(struct work_struct *ignored)
400{ 403{
404 int status;
405
406 get_buffer();
407 dell_send_request(buffer, 17, 11);
408 status = buffer->output[1];
409 release_buffer();
410
411 /* if hardware rfkill is not supported, set it explicitly */
412 if (!(status & BIT(0))) {
413 if (wifi_rfkill)
414 dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
415 if (bluetooth_rfkill)
416 dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
417 if (wwan_rfkill)
418 dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
419 }
420
401 if (wifi_rfkill) 421 if (wifi_rfkill)
402 dell_rfkill_query(wifi_rfkill, (void *)1); 422 dell_rfkill_query(wifi_rfkill, (void *)1);
403 if (bluetooth_rfkill) 423 if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e62762365..61433d492862 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
60#define GPOSW_DOU 0x08 60#define GPOSW_DOU 0x08
61#define GPOSW_RDRV 0x30 61#define GPOSW_RDRV 0x30
62 62
63#define GPIO_UPDATE_TYPE 0x80000000
63 64
64#define NUM_GPIO 24 65#define NUM_GPIO 24
65 66
66struct pmic_gpio_irq {
67 spinlock_t lock;
68 u32 trigger[NUM_GPIO];
69 u32 dirty;
70 struct work_struct work;
71};
72
73
74struct pmic_gpio { 67struct pmic_gpio {
68 struct mutex buslock;
75 struct gpio_chip chip; 69 struct gpio_chip chip;
76 struct pmic_gpio_irq irqtypes;
77 void *gpiointr; 70 void *gpiointr;
78 int irq; 71 int irq;
79 unsigned irq_base; 72 unsigned irq_base;
73 unsigned int update_type;
74 u32 trigger_type;
80}; 75};
81 76
82static void pmic_program_irqtype(int gpio, int type)
83{
84 if (type & IRQ_TYPE_EDGE_RISING)
85 intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
86 else
87 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
88
89 if (type & IRQ_TYPE_EDGE_FALLING)
90 intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
91 else
92 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
93};
94
95static void pmic_irqtype_work(struct work_struct *work)
96{
97 struct pmic_gpio_irq *t =
98 container_of(work, struct pmic_gpio_irq, work);
99 unsigned long flags;
100 int i;
101 u16 type;
102
103 spin_lock_irqsave(&t->lock, flags);
104 /* As we drop the lock, we may need multiple scans if we race the
105 pmic_irq_type function */
106 while (t->dirty) {
107 /*
108 * For each pin that has the dirty bit set send an IPC
109 * message to configure the hardware via the PMIC
110 */
111 for (i = 0; i < NUM_GPIO; i++) {
112 if (!(t->dirty & (1 << i)))
113 continue;
114 t->dirty &= ~(1 << i);
115 /* We can't trust the array entry or dirty
116 once the lock is dropped */
117 type = t->trigger[i];
118 spin_unlock_irqrestore(&t->lock, flags);
119 pmic_program_irqtype(i, type);
120 spin_lock_irqsave(&t->lock, flags);
121 }
122 }
123 spin_unlock_irqrestore(&t->lock, flags);
124}
125
126static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 77static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
127{ 78{
128 if (offset > 8) { 79 if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
190 1 << (offset - 16)); 141 1 << (offset - 16));
191} 142}
192 143
193static int pmic_irq_type(unsigned irq, unsigned type) 144/*
145 * This is called from genirq with pg->buslock locked and
146 * irq_desc->lock held. We can not access the scu bus here, so we
147 * store the change and update in the bus_sync_unlock() function below
148 */
149static int pmic_irq_type(struct irq_data *data, unsigned type)
194{ 150{
195 struct pmic_gpio *pg = get_irq_chip_data(irq); 151 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
196 u32 gpio = irq - pg->irq_base; 152 u32 gpio = data->irq - pg->irq_base;
197 unsigned long flags;
198 153
199 if (gpio >= pg->chip.ngpio) 154 if (gpio >= pg->chip.ngpio)
200 return -EINVAL; 155 return -EINVAL;
201 156
202 spin_lock_irqsave(&pg->irqtypes.lock, flags); 157 pg->trigger_type = type;
203 pg->irqtypes.trigger[gpio] = type; 158 pg->update_type = gpio | GPIO_UPDATE_TYPE;
204 pg->irqtypes.dirty |= (1 << gpio);
205 spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
206 schedule_work(&pg->irqtypes.work);
207 return 0; 159 return 0;
208} 160}
209 161
210
211
212static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 162static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
213{ 163{
214 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); 164 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
217} 167}
218 168
219/* the gpiointr register is read-clear, so just do nothing. */ 169/* the gpiointr register is read-clear, so just do nothing. */
220static void pmic_irq_unmask(unsigned irq) 170static void pmic_irq_unmask(struct irq_data *data) { }
221{
222};
223 171
224static void pmic_irq_mask(unsigned irq) 172static void pmic_irq_mask(struct irq_data *data) { }
225{
226};
227 173
228static struct irq_chip pmic_irqchip = { 174static struct irq_chip pmic_irqchip = {
229 .name = "PMIC-GPIO", 175 .name = "PMIC-GPIO",
230 .mask = pmic_irq_mask, 176 .irq_mask = pmic_irq_mask,
231 .unmask = pmic_irq_unmask, 177 .irq_unmask = pmic_irq_unmask,
232 .set_type = pmic_irq_type, 178 .irq_set_type = pmic_irq_type,
233}; 179};
234 180
235static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) 181static irqreturn_t pmic_irq_handler(int irq, void *data)
236{ 182{
237 struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); 183 struct pmic_gpio *pg = data;
238 u8 intsts = *((u8 *)pg->gpiointr + 4); 184 u8 intsts = *((u8 *)pg->gpiointr + 4);
239 int gpio; 185 int gpio;
186 irqreturn_t ret = IRQ_NONE;
240 187
241 for (gpio = 0; gpio < 8; gpio++) { 188 for (gpio = 0; gpio < 8; gpio++) {
242 if (intsts & (1 << gpio)) { 189 if (intsts & (1 << gpio)) {
243 pr_debug("pmic pin %d triggered\n", gpio); 190 pr_debug("pmic pin %d triggered\n", gpio);
244 generic_handle_irq(pg->irq_base + gpio); 191 generic_handle_irq(pg->irq_base + gpio);
192 ret = IRQ_HANDLED;
245 } 193 }
246 } 194 }
247 195 return ret;
248 if (desc->chip->irq_eoi)
249 desc->chip->irq_eoi(irq_get_irq_data(irq));
250 else
251 dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
252} 196}
253 197
254static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) 198static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
297 pg->chip.can_sleep = 1; 241 pg->chip.can_sleep = 1;
298 pg->chip.dev = dev; 242 pg->chip.dev = dev;
299 243
300 INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); 244 mutex_init(&pg->buslock);
301 spin_lock_init(&pg->irqtypes.lock);
302 245
303 pg->chip.dev = dev; 246 pg->chip.dev = dev;
304 retval = gpiochip_add(&pg->chip); 247 retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
306 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); 249 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
307 goto err; 250 goto err;
308 } 251 }
309 set_irq_data(pg->irq, pg); 252
310 set_irq_chained_handler(pg->irq, pmic_irq_handler); 253 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
254 if (retval) {
255 printk(KERN_WARNING "pmic: Interrupt request failed\n");
256 goto err;
257 }
258
311 for (i = 0; i < 8; i++) { 259 for (i = 0; i < 8; i++) {
312 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, 260 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
313 handle_simple_irq, "demux"); 261 handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1feff71..865ef78d6f1a 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
162 return -EINVAL; \ 162 return -EINVAL; \
163 return count; \ 163 return count; \
164} \ 164} \
165static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ 165static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
166 show_bool_##value, set_bool_##value); 166 show_bool_##value, set_bool_##value);
167 167
168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); 168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd599585c6a9..eb9922385ef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
2275 if (keycode != KEY_RESERVED) { 2275 if (keycode != KEY_RESERVED) {
2276 mutex_lock(&tpacpi_inputdev_send_mutex); 2276 mutex_lock(&tpacpi_inputdev_send_mutex);
2277 2277
2278 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2278 input_report_key(tpacpi_inputdev, keycode, 1); 2279 input_report_key(tpacpi_inputdev, keycode, 1);
2279 if (keycode == KEY_UNKNOWN)
2280 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2281 scancode);
2282 input_sync(tpacpi_inputdev); 2280 input_sync(tpacpi_inputdev);
2283 2281
2282 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2284 input_report_key(tpacpi_inputdev, keycode, 0); 2283 input_report_key(tpacpi_inputdev, keycode, 0);
2285 if (keycode == KEY_UNKNOWN)
2286 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2287 scancode);
2288 input_sync(tpacpi_inputdev); 2284 input_sync(tpacpi_inputdev);
2289 2285
2290 mutex_unlock(&tpacpi_inputdev_send_mutex); 2286 mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
168{ 168{
169 unsigned long flags; 169 unsigned long flags;
170 int captured = 0; 170 int captured = 0;
171 struct pps_ktime ts_real; 171 struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
172 172
173 /* check event type */ 173 /* check event type */
174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
77 77
78 /* Several chips lock up trying to read undefined config space */ 78 /* Several chips lock up trying to read undefined config space */
79 if (capable(CAP_SYS_ADMIN)) 79 if (capable(CAP_SYS_ADMIN))
80 size = 0x200000; 80 size = RIO_MAINT_SPACE_SZ;
81 81
82 if (off > size) 82 if (off >= size)
83 return 0; 83 return 0;
84 if (off + count > size) { 84 if (off + count > size) {
85 size -= off; 85 size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
147 loff_t init_off = off; 147 loff_t init_off = off;
148 u8 *data = (u8 *) buf; 148 u8 *data = (u8 *) buf;
149 149
150 if (off > 0x200000) 150 if (off >= RIO_MAINT_SPACE_SZ)
151 return 0; 151 return 0;
152 if (off + count > 0x200000) { 152 if (off + count > RIO_MAINT_SPACE_SZ) {
153 size = 0x200000 - off; 153 size = RIO_MAINT_SPACE_SZ - off;
154 count = size; 154 count = size;
155 } 155 }
156 156
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
200 .name = "config", 200 .name = "config",
201 .mode = S_IRUGO | S_IWUSR, 201 .mode = S_IRUGO | S_IWUSR,
202 }, 202 },
203 .size = 0x200000, 203 .size = RIO_MAINT_SPACE_SZ,
204 .read = rio_read_config, 204 .read = rio_read_config,
205 .write = rio_write_config, 205 .write = rio_write_config,
206}; 206};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
174 174
175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
176 176
177 BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); 177 BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
178 178
179 return mc13xxx_regulators[id].voltages[val]; 179 return mc13xxx_regulators[id].voltages[val];
180} 180}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
120 return REGULATOR_MODE_IDLE; 120 return REGULATOR_MODE_IDLE;
121 default: 121 default:
122 BUG(); 122 BUG();
123 return -EINVAL;
123 } 124 }
124} 125}
125 126
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e4c926..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
309 .read_alarm = at91_rtc_readalarm, 309 .read_alarm = at91_rtc_readalarm,
310 .set_alarm = at91_rtc_setalarm, 310 .set_alarm = at91_rtc_setalarm,
311 .proc = at91_rtc_proc, 311 .proc = at91_rtc_proc,
312 .alarm_irq_enabled = at91_rtc_alarm_irq_enable, 312 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
313}; 313};
314 314
315/* 315/*
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C 2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
3 * 3 *
4 * Copyright (C) 2009-2010 Freescale Semiconductor. 4 * Copyright (C) 2009-2011 Freescale Semiconductor.
5 * Author: Jack Lan <jack.lan@freescale.com> 5 * Author: Jack Lan <jack.lan@freescale.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
141 time->tm_hour = bcd2bin(hour); 141 time->tm_hour = bcd2bin(hour);
142 } 142 }
143 143
144 time->tm_wday = bcd2bin(week); 144 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
145 time->tm_wday = bcd2bin(week) - 1;
145 time->tm_mday = bcd2bin(day); 146 time->tm_mday = bcd2bin(day);
146 time->tm_mon = bcd2bin(month & 0x7F); 147 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
148 time->tm_mon = bcd2bin(month & 0x7F) - 1;
147 if (century) 149 if (century)
148 add_century = 100; 150 add_century = 100;
149 151
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
162 buf[0] = bin2bcd(time->tm_sec); 164 buf[0] = bin2bcd(time->tm_sec);
163 buf[1] = bin2bcd(time->tm_min); 165 buf[1] = bin2bcd(time->tm_min);
164 buf[2] = bin2bcd(time->tm_hour); 166 buf[2] = bin2bcd(time->tm_hour);
165 buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ 167 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
168 buf[3] = bin2bcd(time->tm_wday + 1);
166 buf[4] = bin2bcd(time->tm_mday); /* Date */ 169 buf[4] = bin2bcd(time->tm_mday); /* Date */
167 buf[5] = bin2bcd(time->tm_mon); 170 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
171 buf[5] = bin2bcd(time->tm_mon + 1);
168 if (time->tm_year >= 100) { 172 if (time->tm_year >= 100) {
169 buf[5] |= 0x80; 173 buf[5] |= 0x80;
170 buf[6] = bin2bcd(time->tm_year - 100); 174 buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..a9fe23d5bd0f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
72static struct ccw_device_id dasd_eckd_ids[] = { 72static struct ccw_device_id dasd_eckd_ids[] = {
73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index 5cf4e9831f1b..11dff23f7838 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -1,6 +1,8 @@
1config SCSI_CXGB3_ISCSI 1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio T3 iSCSI support" 2 tristate "Chelsio T3 iSCSI support"
3 depends on CHELSIO_T3_DEPENDS 3 depends on PCI && INET
4 select NETDEVICES
5 select NETDEV_10000
4 select CHELSIO_T3 6 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS 7 select SCSI_ISCSI_ATTRS
6 ---help--- 8 ---help---
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index bb94b39b17b3..d5302c27f377 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -1,6 +1,8 @@
1config SCSI_CXGB4_ISCSI 1config SCSI_CXGB4_ISCSI
2 tristate "Chelsio T4 iSCSI support" 2 tristate "Chelsio T4 iSCSI support"
3 depends on CHELSIO_T4_DEPENDS 3 depends on PCI && INET
4 select NETDEVICES
5 select NETDEV_10000
4 select CHELSIO_T4 6 select CHELSIO_T4
5 select SCSI_ISCSI_ATTRS 7 select SCSI_ISCSI_ATTRS
6 ---help--- 8 ---help---
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d2ad3d676724..889199aa1f5b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -470,7 +470,8 @@ static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
470 } 470 }
471 }; 471 };
472 472
473 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) 473 rt = ip_route_output_flow(&init_net, &fl, NULL);
474 if (IS_ERR(rt))
474 return NULL; 475 return NULL;
475 476
476 return rt; 477 return rt;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..fb2bb35c62cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue); 446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be3234..5c3ccfc6b622 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q); 3832 __blk_run_queue(rport->rqst_q, false);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index cd8392badff0..6363077468f1 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -104,9 +104,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
104static void wl_release_fw(struct wl_info *wl); 104static void wl_release_fw(struct wl_info *wl);
105 105
106/* local prototypes */ 106/* local prototypes */
107static int wl_start(struct sk_buff *skb, struct wl_info *wl);
108static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
109 struct sk_buff *skb);
110static void wl_dpc(unsigned long data); 107static void wl_dpc(unsigned long data);
111 108
112MODULE_AUTHOR("Broadcom Corporation"); 109MODULE_AUTHOR("Broadcom Corporation");
@@ -135,7 +132,6 @@ module_param(phymsglevel, int, 0);
135 132
136#define HW_TO_WL(hw) (hw->priv) 133#define HW_TO_WL(hw) (hw->priv)
137#define WL_TO_HW(wl) (wl->pub->ieee_hw) 134#define WL_TO_HW(wl) (wl->pub->ieee_hw)
138static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
139static int wl_ops_start(struct ieee80211_hw *hw); 135static int wl_ops_start(struct ieee80211_hw *hw);
140static void wl_ops_stop(struct ieee80211_hw *hw); 136static void wl_ops_stop(struct ieee80211_hw *hw);
141static int wl_ops_add_interface(struct ieee80211_hw *hw, 137static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -173,20 +169,18 @@ static int wl_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
173 enum ieee80211_ampdu_mlme_action action, 169 enum ieee80211_ampdu_mlme_action action,
174 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 170 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
175 171
176static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 172static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
177{ 173{
178 int status;
179 struct wl_info *wl = hw->priv; 174 struct wl_info *wl = hw->priv;
180 WL_LOCK(wl); 175 WL_LOCK(wl);
181 if (!wl->pub->up) { 176 if (!wl->pub->up) {
182 WL_ERROR("ops->tx called while down\n"); 177 WL_ERROR("ops->tx called while down\n");
183 status = -ENETDOWN; 178 kfree_skb(skb);
184 goto done; 179 goto done;
185 } 180 }
186 status = wl_start(skb, wl); 181 wlc_sendpkt_mac80211(wl->wlc, skb, hw);
187 done: 182 done:
188 WL_UNLOCK(wl); 183 WL_UNLOCK(wl);
189 return status;
190} 184}
191 185
192static int wl_ops_start(struct ieee80211_hw *hw) 186static int wl_ops_start(struct ieee80211_hw *hw)
@@ -1325,22 +1319,6 @@ void wl_free(struct wl_info *wl)
1325 osl_detach(osh); 1319 osl_detach(osh);
1326} 1320}
1327 1321
1328/* transmit a packet */
1329static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
1330{
1331 if (!wl)
1332 return -ENETDOWN;
1333
1334 return wl_start_int(wl, WL_TO_HW(wl), skb);
1335}
1336
1337static int BCMFASTPATH
1338wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
1339{
1340 wlc_sendpkt_mac80211(wl->wlc, skb, hw);
1341 return NETDEV_TX_OK;
1342}
1343
1344void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state, 1322void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
1345 int prio) 1323 int prio)
1346{ 1324{
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index e37e8058e2b8..aa12d1a65184 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -6818,11 +6818,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
6818 ratespec_t rspec; 6818 ratespec_t rspec;
6819 unsigned char *plcp; 6819 unsigned char *plcp;
6820 6820
6821#if 0
6822 /* Clearly, this is bogus -- reading the TSF now is wrong */
6821 wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */ 6823 wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */
6822 rx_status->mactime = tsf_h; 6824 rx_status->mactime = tsf_h;
6823 rx_status->mactime <<= 32; 6825 rx_status->mactime <<= 32;
6824 rx_status->mactime |= tsf_l; 6826 rx_status->mactime |= tsf_l;
6825 rx_status->flag |= RX_FLAG_TSFT; 6827 rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */
6828#endif
6826 6829
6827 channel = WLC_CHAN_CHANNEL(rxh->RxChan); 6830 channel = WLC_CHAN_CHANNEL(rxh->RxChan);
6828 6831
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index 89279ba1b737..39413b7d387d 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -525,7 +525,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
525{ 525{
526 int err; 526 int err;
527 527
528 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 528 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
529 return; 529 return;
530 530
531 switch (msg->flags) { 531 switch (msg->flags) {
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 2163d60c2eaf..3724e1e67ec2 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
118 *total_flags = new_flags; 118 *total_flags = new_flags;
119} 119}
120 120
121static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 121static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
122{ 122{
123 struct wbsoft_priv *priv = dev->priv; 123 struct wbsoft_priv *priv = dev->priv;
124 124
125 if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) { 125 if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
126 priv->sMlmeFrame.wNumTxMMPDUDiscarded++; 126 priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
127 return NETDEV_TX_BUSY; 127 kfree_skb(skb);
128 return;
128 } 129 }
129 130
130 priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME; 131 priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
@@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
140 */ 141 */
141 142
142 Mds_Tx(priv); 143 Mds_Tx(priv);
143
144 return NETDEV_TX_OK;
145} 144}
146 145
147static int wbsoft_start(struct ieee80211_hw *dev) 146static int wbsoft_start(struct ieee80211_hw *dev)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig THERMAL 5menuconfig THERMAL
6 tristate "Generic Thermal sysfs driver" 6 tristate "Generic Thermal sysfs driver"
7 depends on NET
8 help 7 help
9 Generic Thermal Sysfs driver offers a generic mechanism for 8 Generic Thermal Sysfs driver offers a generic mechanism for
10 thermal management. Usually it's made up of one or more thermal 9 thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
62 62
63static unsigned int thermal_event_seqnum; 63static unsigned int thermal_event_seqnum;
64 64
65static struct genl_family thermal_event_genl_family = {
66 .id = GENL_ID_GENERATE,
67 .name = THERMAL_GENL_FAMILY_NAME,
68 .version = THERMAL_GENL_VERSION,
69 .maxattr = THERMAL_GENL_ATTR_MAX,
70};
71
72static struct genl_multicast_group thermal_event_mcgrp = {
73 .name = THERMAL_GENL_MCAST_GROUP_NAME,
74};
75
76static int genetlink_init(void);
77static void genetlink_exit(void);
78
79static int get_idr(struct idr *idr, struct mutex *lock, int *id) 65static int get_idr(struct idr *idr, struct mutex *lock, int *id)
80{ 66{
81 int err; 67 int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1225 1211
1226EXPORT_SYMBOL(thermal_zone_device_unregister); 1212EXPORT_SYMBOL(thermal_zone_device_unregister);
1227 1213
1214#ifdef CONFIG_NET
1215static struct genl_family thermal_event_genl_family = {
1216 .id = GENL_ID_GENERATE,
1217 .name = THERMAL_GENL_FAMILY_NAME,
1218 .version = THERMAL_GENL_VERSION,
1219 .maxattr = THERMAL_GENL_ATTR_MAX,
1220};
1221
1222static struct genl_multicast_group thermal_event_mcgrp = {
1223 .name = THERMAL_GENL_MCAST_GROUP_NAME,
1224};
1225
1228int generate_netlink_event(u32 orig, enum events event) 1226int generate_netlink_event(u32 orig, enum events event)
1229{ 1227{
1230 struct sk_buff *skb; 1228 struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
1301 return result; 1299 return result;
1302} 1300}
1303 1301
1302static void genetlink_exit(void)
1303{
1304 genl_unregister_family(&thermal_event_genl_family);
1305}
1306#else /* !CONFIG_NET */
1307static inline int genetlink_init(void) { return 0; }
1308static inline void genetlink_exit(void) {}
1309#endif /* !CONFIG_NET */
1310
1304static int __init thermal_init(void) 1311static int __init thermal_init(void)
1305{ 1312{
1306 int result = 0; 1313 int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
1316 return result; 1323 return result;
1317} 1324}
1318 1325
1319static void genetlink_exit(void)
1320{
1321 genl_unregister_family(&thermal_event_genl_family);
1322}
1323
1324static void __exit thermal_exit(void) 1326static void __exit thermal_exit(void)
1325{ 1327{
1326 class_unregister(&thermal_class); 1328 class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..1ef4df9bf7e4 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 716 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
716 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 717 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
717 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), 718 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c6826e43..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2681 2681
2682 mutex_lock(&usb_address0_mutex); 2682 mutex_lock(&usb_address0_mutex);
2683 2683
2684 if (!udev->config && oldspeed == USB_SPEED_SUPER) { 2684 /* Reset the device; full speed may morph to high speed */
2685 /* Don't reset USB 3.0 devices during an initial setup */ 2685 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2686 usb_set_device_state(udev, USB_STATE_DEFAULT); 2686 retval = hub_port_reset(hub, port1, udev, delay);
2687 } else { 2687 if (retval < 0) /* error or disconnect */
2688 /* Reset the device; full speed may morph to high speed */ 2688 goto fail;
2689 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ 2689 /* success, speed is known */
2690 retval = hub_port_reset(hub, port1, udev, delay); 2690
2691 if (retval < 0) /* error or disconnect */
2692 goto fail;
2693 /* success, speed is known */
2694 }
2695 retval = -ENODEV; 2691 retval = -ENODEV;
2696 2692
2697 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { 2693 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c595432d6f..81ce6a8e1d94 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
48 { USB_DEVICE(0x04b4, 0x0526), .driver_info = 48 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
49 USB_QUIRK_CONFIG_INTF_STRINGS }, 49 USB_QUIRK_CONFIG_INTF_STRINGS },
50 50
51 /* Samsung Android phone modem - ID conflict with SPH-I500 */
52 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
53 USB_QUIRK_CONFIG_INTF_STRINGS },
54
51 /* Roland SC-8820 */ 55 /* Roland SC-8820 */
52 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 56 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
53 57
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
68 /* M-Systems Flash Disk Pioneers */ 72 /* M-Systems Flash Disk Pioneers */
69 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 73 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
70 74
75 /* Keytouch QWERTY Panel keyboard */
76 { USB_DEVICE(0x0926, 0x3333), .driver_info =
77 USB_QUIRK_CONFIG_INTF_STRINGS },
78
71 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ 79 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
72 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, 80 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
73 81
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a058745..5e1495097ec3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 346
347 if (unlikely(!skb)) 347 if (unlikely(!skb))
348 break; 348 break;
349 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
350 req->actual);
351 page = NULL;
352 349
353 if (req->actual < req->length) { /* Last fragment */ 350 if (skb->len == 0) { /* First fragment */
354 skb->protocol = htons(ETH_P_PHONET); 351 skb->protocol = htons(ETH_P_PHONET);
355 skb_reset_mac_header(skb); 352 skb_reset_mac_header(skb);
356 pskb_pull(skb, 1); 353 /* Can't use pskb_pull() on page in IRQ */
354 memcpy(skb_put(skb, 1), page_address(page), 1);
355 }
356
357 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
358 skb->len == 0, req->actual);
359 page = NULL;
360
361 if (req->actual < req->length) { /* Last fragment */
357 skb->dev = dev; 362 skb->dev = dev;
358 dev->stats.rx_packets++; 363 dev->stats.rx_packets++;
359 dev->stats.rx_bytes += skb->len; 364 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36fdf0b..a6f21b891f68 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/of.h> 30#include <linux/of.h>
31#include <linux/of_platform.h> 31#include <linux/of_platform.h>
32#include <linux/of_address.h>
32 33
33/** 34/**
34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset() 35 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
169 } 169 }
170} 170}
171 171
172void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) 172void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
173{ 173{
174 void *addr; 174 struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
175 void __iomem *addr;
175 u32 temp; 176 u32 temp;
176 u64 temp_64; 177 u64 temp_64;
177 178
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
449 } 450 }
450} 451}
451 452
452void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 453static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
453{ 454{
454 /* Fields are 32 bits wide, DMA addresses are in bytes */ 455 /* Fields are 32 bits wide, DMA addresses are in bytes */
455 int field_size = 32 / 8; 456 int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
488 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); 489 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
489} 490}
490 491
491void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, 492static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
492 struct xhci_container_ctx *ctx, 493 struct xhci_container_ctx *ctx,
493 unsigned int last_ep) 494 unsigned int last_ep)
494{ 495{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
307 307
308/***************** Streams structures manipulation *************************/ 308/***************** Streams structures manipulation *************************/
309 309
310void xhci_free_stream_ctx(struct xhci_hcd *xhci, 310static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
311 unsigned int num_stream_ctxs, 311 unsigned int num_stream_ctxs,
312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
313{ 313{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
335 * The stream context array must be a power of 2, and can be as small as 335 * The stream context array must be a power of 2, and can be as small as
336 * 64 bytes or as large as 1MB. 336 * 64 bytes or as large as 1MB.
337 */ 337 */
338struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 338static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
339 unsigned int num_stream_ctxs, dma_addr_t *dma, 339 unsigned int num_stream_ctxs, dma_addr_t *dma,
340 gfp_t mem_flags) 340 gfp_t mem_flags)
341{ 341{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1900 val &= DBOFF_MASK; 1900 val &= DBOFF_MASK;
1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1902 " from cap regs base addr\n", val); 1902 " from cap regs base addr\n", val);
1903 xhci->dba = (void *) xhci->cap_regs + val; 1903 xhci->dba = (void __iomem *) xhci->cap_regs + val;
1904 xhci_dbg_regs(xhci); 1904 xhci_dbg_regs(xhci);
1905 xhci_print_run_regs(xhci); 1905 xhci_print_run_regs(xhci);
1906 /* Set ir_set to interrupt register set 0 */ 1906 /* Set ir_set to interrupt register set 0 */
1907 xhci->ir_set = (void *) xhci->run_regs->ir_set; 1907 xhci->ir_set = &xhci->run_regs->ir_set[0];
1908 1908
1909 /* 1909 /*
1910 * Event ring setup: Allocate a normal ring, but also setup 1910 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1961 /* Set the event ring dequeue address */ 1961 /* Set the event ring dequeue address */
1962 xhci_set_hc_event_deq(xhci); 1962 xhci_set_hc_event_deq(xhci);
1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1964 xhci_print_ir_set(xhci, xhci->ir_set, 0); 1964 xhci_print_ir_set(xhci, 0);
1965 1965
1966 /* 1966 /*
1967 * XXX: Might need to set the Interrupter Moderation Register to 1967 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
474 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 474 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
475 dev->eps[ep_index].stopped_trb, 475 dev->eps[ep_index].stopped_trb,
476 &state->new_cycle_state); 476 &state->new_cycle_state);
477 if (!state->new_deq_seg) 477 if (!state->new_deq_seg) {
478 BUG(); 478 WARN_ON(1);
479 return;
480 }
481
479 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 482 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
480 xhci_dbg(xhci, "Finding endpoint context\n"); 483 xhci_dbg(xhci, "Finding endpoint context\n");
481 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 484 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
486 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 489 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
487 state->new_deq_ptr, 490 state->new_deq_ptr,
488 &state->new_cycle_state); 491 &state->new_cycle_state);
489 if (!state->new_deq_seg) 492 if (!state->new_deq_seg) {
490 BUG(); 493 WARN_ON(1);
494 return;
495 }
491 496
492 trb = &state->new_deq_ptr->generic; 497 trb = &state->new_deq_ptr->generic;
493 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 498 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2363 2368
2364 /* Scatter gather list entries may cross 64KB boundaries */ 2369 /* Scatter gather list entries may cross 64KB boundaries */
2365 running_total = TRB_MAX_BUFF_SIZE - 2370 running_total = TRB_MAX_BUFF_SIZE -
2366 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2371 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2372 running_total &= TRB_MAX_BUFF_SIZE - 1;
2367 if (running_total != 0) 2373 if (running_total != 0)
2368 num_trbs++; 2374 num_trbs++;
2369 2375
2370 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2376 /* How many more 64KB chunks to transfer, how many more TRBs? */
2371 while (running_total < sg_dma_len(sg)) { 2377 while (running_total < sg_dma_len(sg) && running_total < temp) {
2372 num_trbs++; 2378 num_trbs++;
2373 running_total += TRB_MAX_BUFF_SIZE; 2379 running_total += TRB_MAX_BUFF_SIZE;
2374 } 2380 }
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2394static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2400static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2395{ 2401{
2396 if (num_trbs != 0) 2402 if (num_trbs != 0)
2397 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2403 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2398 "TRBs, %d left\n", __func__, 2404 "TRBs, %d left\n", __func__,
2399 urb->ep->desc.bEndpointAddress, num_trbs); 2405 urb->ep->desc.bEndpointAddress, num_trbs);
2400 if (running_total != urb->transfer_buffer_length) 2406 if (running_total != urb->transfer_buffer_length)
2401 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2407 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2402 "queued %#x (%d), asked for %#x (%d)\n", 2408 "queued %#x (%d), asked for %#x (%d)\n",
2403 __func__, 2409 __func__,
2404 urb->ep->desc.bEndpointAddress, 2410 urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2533 sg = urb->sg; 2539 sg = urb->sg;
2534 addr = (u64) sg_dma_address(sg); 2540 addr = (u64) sg_dma_address(sg);
2535 this_sg_len = sg_dma_len(sg); 2541 this_sg_len = sg_dma_len(sg);
2536 trb_buff_len = TRB_MAX_BUFF_SIZE - 2542 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2537 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2538 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2543 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2539 if (trb_buff_len > urb->transfer_buffer_length) 2544 if (trb_buff_len > urb->transfer_buffer_length)
2540 trb_buff_len = urb->transfer_buffer_length; 2545 trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2572 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2577 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2573 (unsigned int) addr + trb_buff_len); 2578 (unsigned int) addr + trb_buff_len);
2574 if (TRB_MAX_BUFF_SIZE - 2579 if (TRB_MAX_BUFF_SIZE -
2575 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 2580 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2576 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2581 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2577 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2582 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2578 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2583 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2616 } 2621 }
2617 2622
2618 trb_buff_len = TRB_MAX_BUFF_SIZE - 2623 trb_buff_len = TRB_MAX_BUFF_SIZE -
2619 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2624 (addr & (TRB_MAX_BUFF_SIZE - 1));
2620 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2625 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2621 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2626 if (running_total + trb_buff_len > urb->transfer_buffer_length)
2622 trb_buff_len = 2627 trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2656 num_trbs = 0; 2661 num_trbs = 0;
2657 /* How much data is (potentially) left before the 64KB boundary? */ 2662 /* How much data is (potentially) left before the 64KB boundary? */
2658 running_total = TRB_MAX_BUFF_SIZE - 2663 running_total = TRB_MAX_BUFF_SIZE -
2659 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2664 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2665 running_total &= TRB_MAX_BUFF_SIZE - 1;
2660 2666
2661 /* If there's some data on this 64KB chunk, or we have to send a 2667 /* If there's some data on this 64KB chunk, or we have to send a
2662 * zero-length transfer, we need at least one TRB 2668 * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2700 /* How much data is in the first TRB? */ 2706 /* How much data is in the first TRB? */
2701 addr = (u64) urb->transfer_dma; 2707 addr = (u64) urb->transfer_dma;
2702 trb_buff_len = TRB_MAX_BUFF_SIZE - 2708 trb_buff_len = TRB_MAX_BUFF_SIZE -
2703 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2709 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2704 if (urb->transfer_buffer_length < trb_buff_len) 2710 if (trb_buff_len > urb->transfer_buffer_length)
2705 trb_buff_len = urb->transfer_buffer_length; 2711 trb_buff_len = urb->transfer_buffer_length;
2706 2712
2707 first_trb = true; 2713 first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2879 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 2885 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2880 td_len = urb->iso_frame_desc[i].length; 2886 td_len = urb->iso_frame_desc[i].length;
2881 2887
2882 running_total = TRB_MAX_BUFF_SIZE - 2888 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2883 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2889 running_total &= TRB_MAX_BUFF_SIZE - 1;
2884 if (running_total != 0) 2890 if (running_total != 0)
2885 num_trbs++; 2891 num_trbs++;
2886 2892
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
109/* 109/*
110 * Set the run bit and wait for the host to be running. 110 * Set the run bit and wait for the host to be running.
111 */ 111 */
112int xhci_start(struct xhci_hcd *xhci) 112static int xhci_start(struct xhci_hcd *xhci)
113{ 113{
114 u32 temp; 114 u32 temp;
115 int ret; 115 int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
329 329
330 330
331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
332void xhci_event_ring_work(unsigned long arg) 332static void xhci_event_ring_work(unsigned long arg)
333{ 333{
334 unsigned long flags; 334 unsigned long flags;
335 int temp; 335 int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
474 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 474 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
475 &xhci->ir_set->irq_pending); 475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, xhci->ir_set, 0); 476 xhci_print_ir_set(xhci, 0);
477 477
478 if (NUM_TEST_NOOPS > 0) 478 if (NUM_TEST_NOOPS > 0)
479 doorbell = xhci_setup_one_noop(xhci); 479 doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
529 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 529 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
530 &xhci->ir_set->irq_pending); 530 &xhci->ir_set->irq_pending);
531 xhci_print_ir_set(xhci, xhci->ir_set, 0); 531 xhci_print_ir_set(xhci, 0);
532 532
533 xhci_dbg(xhci, "cleaning up memory\n"); 533 xhci_dbg(xhci, "cleaning up memory\n");
534 xhci_mem_cleanup(xhci); 534 xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
756 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 756 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
757 &xhci->ir_set->irq_pending); 757 &xhci->ir_set->irq_pending);
758 xhci_print_ir_set(xhci, xhci->ir_set, 0); 758 xhci_print_ir_set(xhci, 0);
759 759
760 xhci_dbg(xhci, "cleaning up memory\n"); 760 xhci_dbg(xhci, "cleaning up memory\n");
761 xhci_mem_cleanup(xhci); 761 xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
857/* Returns 1 if the arguments are OK; 857/* Returns 1 if the arguments are OK;
858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
859 */ 859 */
860int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 860static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
862 const char *func) { 862 const char *func) {
863 struct xhci_hcd *xhci; 863 struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1694} 1694}
1695 1695
1696void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 1696static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1697 unsigned int slot_id, unsigned int ep_index, 1697 unsigned int slot_id, unsigned int ep_index,
1698 struct xhci_dequeue_state *deq_state) 1698 struct xhci_dequeue_state *deq_state)
1699{ 1699{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
1348} 1348}
1349 1349
1350/* xHCI debugging */ 1350/* xHCI debugging */
1351void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1351void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
1352void xhci_print_registers(struct xhci_hcd *xhci); 1352void xhci_print_registers(struct xhci_hcd *xhci);
1353void xhci_dbg_regs(struct xhci_hcd *xhci); 1353void xhci_dbg_regs(struct xhci_hcd *xhci);
1354void xhci_print_run_regs(struct xhci_hcd *xhci); 1354void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1047d6..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
1864 INIT_LIST_HEAD(&musb->out_bulk); 1864 INIT_LIST_HEAD(&musb->out_bulk);
1865 1865
1866 hcd->uses_new_polling = 1; 1866 hcd->uses_new_polling = 1;
1867 hcd->has_tt = 1;
1867 1868
1868 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1869 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1869 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1870 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d74a8113ae74..e6400be8a0f8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,15 @@ struct musb {
488 unsigned set_address:1; 488 unsigned set_address:1;
489 unsigned test_mode:1; 489 unsigned test_mode:1;
490 unsigned softconnect:1; 490 unsigned softconnect:1;
491
492 u8 address;
493 u8 test_mode_nr;
494 u16 ackpend; /* ep0 */
495 enum musb_g_ep0_state ep0_state;
496 struct usb_gadget g; /* the gadget */
497 struct usb_gadget_driver *gadget_driver; /* its driver */
498#endif
499
491 /* 500 /*
492 * FIXME: Remove this flag. 501 * FIXME: Remove this flag.
493 * 502 *
@@ -501,14 +510,6 @@ struct musb {
501 */ 510 */
502 unsigned double_buffer_not_ok:1 __deprecated; 511 unsigned double_buffer_not_ok:1 __deprecated;
503 512
504 u8 address;
505 u8 test_mode_nr;
506 u16 ackpend; /* ep0 */
507 enum musb_g_ep0_state ep0_state;
508 struct usb_gadget g; /* the gadget */
509 struct usb_gadget_driver *gadget_driver; /* its driver */
510#endif
511
512 struct musb_hdrc_config *config; 513 struct musb_hdrc_config *config;
513 514
514#ifdef MUSB_CONFIG_PROC_FS 515#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f12333fc41..bc8badd16897 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
362 362
363static int omap2430_musb_exit(struct musb *musb) 363static int omap2430_musb_exit(struct musb *musb)
364{ 364{
365 del_timer_sync(&musb_idle_timer);
365 366
366 omap2430_low_level_exit(musb); 367 omap2430_low_level_exit(musb);
367 otg_put_transceiver(musb->xceiv); 368 otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8a49e4..0457813eebee 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
301 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 301 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
303 }, 303 },
304 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
305 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
306 },
304 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ 307 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
305 308
306 { } 309 { }
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a485c3..9c014e2ecd68 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
295 __func__, status, endpoint); 295 __func__, status, endpoint);
296 } else { 296 } else {
297 tty = tty_port_tty_get(&port->port); 297 tty = tty_port_tty_get(&port->port);
298 if (urb->actual_length) { 298 if (tty) {
299 tty_insert_flip_string(tty, data, urb->actual_length); 299 if (urb->actual_length) {
300 tty_flip_buffer_push(tty); 300 tty_insert_flip_string(tty, data,
301 } else 301 urb->actual_length);
302 dbg("%s: empty read urb received", __func__); 302 tty_flip_buffer_push(tty);
303 tty_kref_put(tty); 303 } else
304 dbg("%s: empty read urb received", __func__);
305 tty_kref_put(tty);
306 }
304 307
305 /* Resubmit urb so we continue receiving */ 308 /* Resubmit urb so we continue receiving */
306 if (status != -ESHUTDOWN) { 309 if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89b7f39..1c11959a7d58 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/usb/serial.h> 29#include <linux/usb/serial.h>
30#include <linux/usb/cdc.h>
30#include "visor.h" 31#include "visor.h"
31 32
32/* 33/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
479 480
480 dbg("%s", __func__); 481 dbg("%s", __func__);
481 482
483 /*
484 * some Samsung Android phones in modem mode have the same ID
485 * as SPH-I500, but they are ACM devices, so dont bind to them
486 */
487 if (id->idVendor == SAMSUNG_VENDOR_ID &&
488 id->idProduct == SAMSUNG_SPH_I500_ID &&
489 serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
490 serial->dev->descriptor.bDeviceSubClass ==
491 USB_CDC_SUBCLASS_ACM)
492 return -ENODEV;
493
482 if (serial->dev->actconfig->desc.bConfigurationValue != 1) { 494 if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
483 dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", 495 dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
484 serial->dev->actconfig->desc.bConfigurationValue); 496 serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 52ec0959d462..5180a215d781 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
73 struct uvesafb_task *utask; 73 struct uvesafb_task *utask;
74 struct uvesafb_ktask *task; 74 struct uvesafb_ktask *task;
75 75
76 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) 76 if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
77 return; 77 return;
78 78
79 if (msg->seq >= UVESAFB_TASKS_MAX) 79 if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb1d3b5..789b3afb3423 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
140 candidate->first = candidate->last = index; 140 candidate->first = candidate->last = index;
141 candidate->offset_first = from; 141 candidate->offset_first = from;
142 candidate->to_last = to; 142 candidate->to_last = to;
143 INIT_LIST_HEAD(&candidate->link);
143 candidate->usage = 1; 144 candidate->usage = 1;
144 candidate->state = AFS_WBACK_PENDING; 145 candidate->state = AFS_WBACK_PENDING;
145 init_waitqueue_head(&candidate->waitq); 146 init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index fc557a3be0a9..26869cde3953 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
239 call_rcu(&ctx->rcu_head, ctx_rcu_free); 239 call_rcu(&ctx->rcu_head, ctx_rcu_free);
240} 240}
241 241
242#define get_ioctx(kioctx) do { \ 242static inline void get_ioctx(struct kioctx *kioctx)
243 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 243{
244 atomic_inc(&(kioctx)->users); \ 244 BUG_ON(atomic_read(&kioctx->users) <= 0);
245} while (0) 245 atomic_inc(&kioctx->users);
246#define put_ioctx(kioctx) do { \ 246}
247 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 247
248 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 248static inline int try_get_ioctx(struct kioctx *kioctx)
249 __put_ioctx(kioctx); \ 249{
250} while (0) 250 return atomic_inc_not_zero(&kioctx->users);
251}
252
253static inline void put_ioctx(struct kioctx *kioctx)
254{
255 BUG_ON(atomic_read(&kioctx->users) <= 0);
256 if (unlikely(atomic_dec_and_test(&kioctx->users)))
257 __put_ioctx(kioctx);
258}
251 259
252/* ioctx_alloc 260/* ioctx_alloc
253 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 261 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
601 rcu_read_lock(); 609 rcu_read_lock();
602 610
603 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 611 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
604 if (ctx->user_id == ctx_id && !ctx->dead) { 612 /*
605 get_ioctx(ctx); 613 * RCU protects us against accessing freed memory but
614 * we have to be careful not to get a reference when the
615 * reference count already dropped to 0 (ctx->dead test
616 * is unreliable because of races).
617 */
618 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
606 ret = ctx; 619 ret = ctx;
607 break; 620 break;
608 } 621 }
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1629 goto out_put_req; 1642 goto out_put_req;
1630 1643
1631 spin_lock_irq(&ctx->ctx_lock); 1644 spin_lock_irq(&ctx->ctx_lock);
1645 /*
1646 * We could have raced with io_destroy() and are currently holding a
1647 * reference to ctx which should be destroyed. We cannot submit IO
1648 * since ctx gets freed as soon as io_submit() puts its reference. The
1649 * check here is reliable: io_destroy() sets ctx->dead before waiting
1650 * for outstanding IO and the barrier between these two is realized by
1651 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1652 * increment ctx->reqs_active before checking for ctx->dead and the
1653 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1654 * don't see ctx->dead set here, io_destroy() waits for our IO to
1655 * finish.
1656 */
1657 if (ctx->dead) {
1658 spin_unlock_irq(&ctx->ctx_lock);
1659 ret = -EINVAL;
1660 goto out_put_req;
1661 }
1632 aio_run_iocb(req); 1662 aio_run_iocb(req);
1633 if (!list_empty(&ctx->run_list)) { 1663 if (!list_empty(&ctx->run_list)) {
1634 /* drain the run list */ 1664 /* drain the run list */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4fb8a3431531..889287019599 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
873 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); 873 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
874 if (ret) 874 if (ret)
875 goto out_del; 875 goto out_del;
876 /*
877 * bdev could be deleted beneath us which would implicitly destroy
878 * the holder directory. Hold on to it.
879 */
880 kobject_get(bdev->bd_part->holder_dir);
876 881
877 list_add(&holder->list, &bdev->bd_holder_disks); 882 list_add(&holder->list, &bdev->bd_holder_disks);
878 goto out_unlock; 883 goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
909 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); 914 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
910 del_symlink(bdev->bd_part->holder_dir, 915 del_symlink(bdev->bd_part->holder_dir,
911 &disk_to_dev(disk)->kobj); 916 &disk_to_dev(disk)->kobj);
917 kobject_put(bdev->bd_part->holder_dir);
912 list_del_init(&holder->list); 918 list_del_init(&holder->list);
913 kfree(holder); 919 kfree(holder);
914 } 920 }
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
922 * flush_disk - invalidates all buffer-cache entries on a disk 928 * flush_disk - invalidates all buffer-cache entries on a disk
923 * 929 *
924 * @bdev: struct block device to be flushed 930 * @bdev: struct block device to be flushed
931 * @kill_dirty: flag to guide handling of dirty inodes
925 * 932 *
926 * Invalidates all buffer-cache entries on a disk. It should be called 933 * Invalidates all buffer-cache entries on a disk. It should be called
927 * when a disk has been changed -- either by a media change or online 934 * when a disk has been changed -- either by a media change or online
928 * resize. 935 * resize.
929 */ 936 */
930static void flush_disk(struct block_device *bdev) 937static void flush_disk(struct block_device *bdev, bool kill_dirty)
931{ 938{
932 if (__invalidate_device(bdev)) { 939 if (__invalidate_device(bdev, kill_dirty)) {
933 char name[BDEVNAME_SIZE] = ""; 940 char name[BDEVNAME_SIZE] = "";
934 941
935 if (bdev->bd_disk) 942 if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
966 "%s: detected capacity change from %lld to %lld\n", 973 "%s: detected capacity change from %lld to %lld\n",
967 name, bdev_size, disk_size); 974 name, bdev_size, disk_size);
968 i_size_write(bdev->bd_inode, disk_size); 975 i_size_write(bdev->bd_inode, disk_size);
969 flush_disk(bdev); 976 flush_disk(bdev, false);
970 } 977 }
971} 978}
972EXPORT_SYMBOL(check_disk_size_change); 979EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
1019 if (!(events & DISK_EVENT_MEDIA_CHANGE)) 1026 if (!(events & DISK_EVENT_MEDIA_CHANGE))
1020 return 0; 1027 return 0;
1021 1028
1022 flush_disk(bdev); 1029 flush_disk(bdev, true);
1023 if (bdops->revalidate_disk) 1030 if (bdops->revalidate_disk)
1024 bdops->revalidate_disk(bdev->bd_disk); 1031 bdops->revalidate_disk(bdev->bd_disk);
1025 return 1; 1032 return 1;
@@ -1600,7 +1607,7 @@ fail:
1600} 1607}
1601EXPORT_SYMBOL(lookup_bdev); 1608EXPORT_SYMBOL(lookup_bdev);
1602 1609
1603int __invalidate_device(struct block_device *bdev) 1610int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1604{ 1611{
1605 struct super_block *sb = get_super(bdev); 1612 struct super_block *sb = get_super(bdev);
1606 int res = 0; 1613 int res = 0;
@@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
1613 * hold). 1620 * hold).
1614 */ 1621 */
1615 shrink_dcache_sb(sb); 1622 shrink_dcache_sb(sb);
1616 res = invalidate_inodes(sb); 1623 res = invalidate_inodes(sb, kill_dirty);
1617 drop_super(sb); 1624 drop_super(sb);
1618 } 1625 }
1619 invalidate_bdev(bdev); 1626 invalidate_bdev(bdev);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2c98b3af6052..6f820fa23df4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1254,6 +1254,7 @@ struct btrfs_root {
1254#define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 1254#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
1255#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 1255#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
1256#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1256#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1257#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1257 1258
1258#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1259#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1259#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1260#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
2218 u64 start, u64 end); 2219 u64 start, u64 end);
2219int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 2220int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
2220 u64 num_bytes); 2221 u64 num_bytes);
2222int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
2223 struct btrfs_root *root, u64 type);
2221 2224
2222/* ctree.c */ 2225/* ctree.c */
2223int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2226int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f3c96fc01439..588ff9849873 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5376,7 +5376,7 @@ again:
5376 num_bytes, data, 1); 5376 num_bytes, data, 1);
5377 goto again; 5377 goto again;
5378 } 5378 }
5379 if (ret == -ENOSPC) { 5379 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5380 struct btrfs_space_info *sinfo; 5380 struct btrfs_space_info *sinfo;
5381 5381
5382 sinfo = __find_space_info(root->fs_info, data); 5382 sinfo = __find_space_info(root->fs_info, data);
@@ -8065,6 +8065,13 @@ out:
8065 return ret; 8065 return ret;
8066} 8066}
8067 8067
8068int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8069 struct btrfs_root *root, u64 type)
8070{
8071 u64 alloc_flags = get_alloc_profile(root, type);
8072 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8073}
8074
8068/* 8075/*
8069 * helper to account the unused space of all the readonly block group in the 8076 * helper to account the unused space of all the readonly block group in the
8070 * list. takes mirrors into account. 8077 * list. takes mirrors into account.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 92ac5192c518..fd3f172e94e6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1433 */ 1433 */
1434u64 count_range_bits(struct extent_io_tree *tree, 1434u64 count_range_bits(struct extent_io_tree *tree,
1435 u64 *start, u64 search_end, u64 max_bytes, 1435 u64 *start, u64 search_end, u64 max_bytes,
1436 unsigned long bits) 1436 unsigned long bits, int contig)
1437{ 1437{
1438 struct rb_node *node; 1438 struct rb_node *node;
1439 struct extent_state *state; 1439 struct extent_state *state;
1440 u64 cur_start = *start; 1440 u64 cur_start = *start;
1441 u64 total_bytes = 0; 1441 u64 total_bytes = 0;
1442 u64 last = 0;
1442 int found = 0; 1443 int found = 0;
1443 1444
1444 if (search_end <= cur_start) { 1445 if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
1463 state = rb_entry(node, struct extent_state, rb_node); 1464 state = rb_entry(node, struct extent_state, rb_node);
1464 if (state->start > search_end) 1465 if (state->start > search_end)
1465 break; 1466 break;
1466 if (state->end >= cur_start && (state->state & bits)) { 1467 if (contig && found && state->start > last + 1)
1468 break;
1469 if (state->end >= cur_start && (state->state & bits) == bits) {
1467 total_bytes += min(search_end, state->end) + 1 - 1470 total_bytes += min(search_end, state->end) + 1 -
1468 max(cur_start, state->start); 1471 max(cur_start, state->start);
1469 if (total_bytes >= max_bytes) 1472 if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
1472 *start = state->start; 1475 *start = state->start;
1473 found = 1; 1476 found = 1;
1474 } 1477 }
1478 last = state->end;
1479 } else if (contig && found) {
1480 break;
1475 } 1481 }
1476 node = rb_next(node); 1482 node = rb_next(node);
1477 if (!node) 1483 if (!node)
@@ -2912,6 +2918,46 @@ out:
2912 return sector; 2918 return sector;
2913} 2919}
2914 2920
2921/*
2922 * helper function for fiemap, which doesn't want to see any holes.
2923 * This maps until we find something past 'last'
2924 */
2925static struct extent_map *get_extent_skip_holes(struct inode *inode,
2926 u64 offset,
2927 u64 last,
2928 get_extent_t *get_extent)
2929{
2930 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2931 struct extent_map *em;
2932 u64 len;
2933
2934 if (offset >= last)
2935 return NULL;
2936
2937 while(1) {
2938 len = last - offset;
2939 if (len == 0)
2940 break;
2941 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2942 em = get_extent(inode, NULL, 0, offset, len, 0);
2943 if (!em || IS_ERR(em))
2944 return em;
2945
2946 /* if this isn't a hole return it */
2947 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2948 em->block_start != EXTENT_MAP_HOLE) {
2949 return em;
2950 }
2951
2952 /* this is a hole, advance to the next extent */
2953 offset = extent_map_end(em);
2954 free_extent_map(em);
2955 if (offset >= last)
2956 break;
2957 }
2958 return NULL;
2959}
2960
2915int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2961int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2916 __u64 start, __u64 len, get_extent_t *get_extent) 2962 __u64 start, __u64 len, get_extent_t *get_extent)
2917{ 2963{
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2921 u32 flags = 0; 2967 u32 flags = 0;
2922 u32 found_type; 2968 u32 found_type;
2923 u64 last; 2969 u64 last;
2970 u64 last_for_get_extent = 0;
2924 u64 disko = 0; 2971 u64 disko = 0;
2972 u64 isize = i_size_read(inode);
2925 struct btrfs_key found_key; 2973 struct btrfs_key found_key;
2926 struct extent_map *em = NULL; 2974 struct extent_map *em = NULL;
2927 struct extent_state *cached_state = NULL; 2975 struct extent_state *cached_state = NULL;
2928 struct btrfs_path *path; 2976 struct btrfs_path *path;
2929 struct btrfs_file_extent_item *item; 2977 struct btrfs_file_extent_item *item;
2930 int end = 0; 2978 int end = 0;
2931 u64 em_start = 0, em_len = 0; 2979 u64 em_start = 0;
2980 u64 em_len = 0;
2981 u64 em_end = 0;
2932 unsigned long emflags; 2982 unsigned long emflags;
2933 int hole = 0;
2934 2983
2935 if (len == 0) 2984 if (len == 0)
2936 return -EINVAL; 2985 return -EINVAL;
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2940 return -ENOMEM; 2989 return -ENOMEM;
2941 path->leave_spinning = 1; 2990 path->leave_spinning = 1;
2942 2991
2992 /*
2993 * lookup the last file extent. We're not using i_size here
2994 * because there might be preallocation past i_size
2995 */
2943 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 2996 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2944 path, inode->i_ino, -1, 0); 2997 path, inode->i_ino, -1, 0);
2945 if (ret < 0) { 2998 if (ret < 0) {
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2953 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 3006 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2954 found_type = btrfs_key_type(&found_key); 3007 found_type = btrfs_key_type(&found_key);
2955 3008
2956 /* No extents, just return */ 3009 /* No extents, but there might be delalloc bits */
2957 if (found_key.objectid != inode->i_ino || 3010 if (found_key.objectid != inode->i_ino ||
2958 found_type != BTRFS_EXTENT_DATA_KEY) { 3011 found_type != BTRFS_EXTENT_DATA_KEY) {
2959 btrfs_free_path(path); 3012 /* have to trust i_size as the end */
2960 return 0; 3013 last = (u64)-1;
3014 last_for_get_extent = isize;
3015 } else {
3016 /*
3017 * remember the start of the last extent. There are a
3018 * bunch of different factors that go into the length of the
3019 * extent, so its much less complex to remember where it started
3020 */
3021 last = found_key.offset;
3022 last_for_get_extent = last + 1;
2961 } 3023 }
2962 last = found_key.offset;
2963 btrfs_free_path(path); 3024 btrfs_free_path(path);
2964 3025
3026 /*
3027 * we might have some extents allocated but more delalloc past those
3028 * extents. so, we trust isize unless the start of the last extent is
3029 * beyond isize
3030 */
3031 if (last < isize) {
3032 last = (u64)-1;
3033 last_for_get_extent = isize;
3034 }
3035
2965 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3036 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2966 &cached_state, GFP_NOFS); 3037 &cached_state, GFP_NOFS);
2967 em = get_extent(inode, NULL, 0, off, max - off, 0); 3038
3039 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3040 get_extent);
2968 if (!em) 3041 if (!em)
2969 goto out; 3042 goto out;
2970 if (IS_ERR(em)) { 3043 if (IS_ERR(em)) {
@@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2973 } 3046 }
2974 3047
2975 while (!end) { 3048 while (!end) {
2976 hole = 0; 3049 off = extent_map_end(em);
2977 off = em->start + em->len;
2978 if (off >= max) 3050 if (off >= max)
2979 end = 1; 3051 end = 1;
2980 3052
2981 if (em->block_start == EXTENT_MAP_HOLE) {
2982 hole = 1;
2983 goto next;
2984 }
2985
2986 em_start = em->start; 3053 em_start = em->start;
2987 em_len = em->len; 3054 em_len = em->len;
2988 3055 em_end = extent_map_end(em);
3056 emflags = em->flags;
2989 disko = 0; 3057 disko = 0;
2990 flags = 0; 3058 flags = 0;
2991 3059
@@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3004 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 3072 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3005 flags |= FIEMAP_EXTENT_ENCODED; 3073 flags |= FIEMAP_EXTENT_ENCODED;
3006 3074
3007next:
3008 emflags = em->flags;
3009 free_extent_map(em); 3075 free_extent_map(em);
3010 em = NULL; 3076 em = NULL;
3011 if (!end) { 3077 if ((em_start >= last) || em_len == (u64)-1 ||
3012 em = get_extent(inode, NULL, 0, off, max - off, 0); 3078 (last == (u64)-1 && isize <= em_end)) {
3013 if (!em)
3014 goto out;
3015 if (IS_ERR(em)) {
3016 ret = PTR_ERR(em);
3017 goto out;
3018 }
3019 emflags = em->flags;
3020 }
3021
3022 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
3023 flags |= FIEMAP_EXTENT_LAST; 3079 flags |= FIEMAP_EXTENT_LAST;
3024 end = 1; 3080 end = 1;
3025 } 3081 }
3026 3082
3027 if (em_start == last) { 3083 /* now scan forward to see if this is really the last extent. */
3084 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3085 get_extent);
3086 if (IS_ERR(em)) {
3087 ret = PTR_ERR(em);
3088 goto out;
3089 }
3090 if (!em) {
3028 flags |= FIEMAP_EXTENT_LAST; 3091 flags |= FIEMAP_EXTENT_LAST;
3029 end = 1; 3092 end = 1;
3030 } 3093 }
3031 3094 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3032 if (!hole) { 3095 em_len, flags);
3033 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 3096 if (ret)
3034 em_len, flags); 3097 goto out_free;
3035 if (ret)
3036 goto out_free;
3037 }
3038 } 3098 }
3039out_free: 3099out_free:
3040 free_extent_map(em); 3100 free_extent_map(em);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7083cfafd061..9318dfefd59c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -191,7 +191,7 @@ void extent_io_exit(void);
191 191
192u64 count_range_bits(struct extent_io_tree *tree, 192u64 count_range_bits(struct extent_io_tree *tree,
193 u64 *start, u64 search_end, 193 u64 *start, u64 search_end,
194 u64 max_bytes, unsigned long bits); 194 u64 max_bytes, unsigned long bits, int contig);
195 195
196void free_extent_state(struct extent_state *state); 196void free_extent_state(struct extent_state *state);
197int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 197int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb9bd7832b6d..0efdb65953c5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1913 1913
1914 private = 0; 1914 private = 0;
1915 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, 1915 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1916 (u64)-1, 1, EXTENT_DIRTY)) { 1916 (u64)-1, 1, EXTENT_DIRTY, 0)) {
1917 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, 1917 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1918 start, &private_failure); 1918 start, &private_failure);
1919 if (ret == 0) { 1919 if (ret == 0) {
@@ -5280,6 +5280,128 @@ out:
5280 return em; 5280 return em;
5281} 5281}
5282 5282
5283struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5284 size_t pg_offset, u64 start, u64 len,
5285 int create)
5286{
5287 struct extent_map *em;
5288 struct extent_map *hole_em = NULL;
5289 u64 range_start = start;
5290 u64 end;
5291 u64 found;
5292 u64 found_end;
5293 int err = 0;
5294
5295 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5296 if (IS_ERR(em))
5297 return em;
5298 if (em) {
5299 /*
5300 * if our em maps to a hole, there might
5301 * actually be delalloc bytes behind it
5302 */
5303 if (em->block_start != EXTENT_MAP_HOLE)
5304 return em;
5305 else
5306 hole_em = em;
5307 }
5308
5309 /* check to see if we've wrapped (len == -1 or similar) */
5310 end = start + len;
5311 if (end < start)
5312 end = (u64)-1;
5313 else
5314 end -= 1;
5315
5316 em = NULL;
5317
5318 /* ok, we didn't find anything, lets look for delalloc */
5319 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5320 end, len, EXTENT_DELALLOC, 1);
5321 found_end = range_start + found;
5322 if (found_end < range_start)
5323 found_end = (u64)-1;
5324
5325 /*
5326 * we didn't find anything useful, return
5327 * the original results from get_extent()
5328 */
5329 if (range_start > end || found_end <= start) {
5330 em = hole_em;
5331 hole_em = NULL;
5332 goto out;
5333 }
5334
5335 /* adjust the range_start to make sure it doesn't
5336 * go backwards from the start they passed in
5337 */
5338 range_start = max(start,range_start);
5339 found = found_end - range_start;
5340
5341 if (found > 0) {
5342 u64 hole_start = start;
5343 u64 hole_len = len;
5344
5345 em = alloc_extent_map(GFP_NOFS);
5346 if (!em) {
5347 err = -ENOMEM;
5348 goto out;
5349 }
5350 /*
5351 * when btrfs_get_extent can't find anything it
5352 * returns one huge hole
5353 *
5354 * make sure what it found really fits our range, and
5355 * adjust to make sure it is based on the start from
5356 * the caller
5357 */
5358 if (hole_em) {
5359 u64 calc_end = extent_map_end(hole_em);
5360
5361 if (calc_end <= start || (hole_em->start > end)) {
5362 free_extent_map(hole_em);
5363 hole_em = NULL;
5364 } else {
5365 hole_start = max(hole_em->start, start);
5366 hole_len = calc_end - hole_start;
5367 }
5368 }
5369 em->bdev = NULL;
5370 if (hole_em && range_start > hole_start) {
5371 /* our hole starts before our delalloc, so we
5372 * have to return just the parts of the hole
5373 * that go until the delalloc starts
5374 */
5375 em->len = min(hole_len,
5376 range_start - hole_start);
5377 em->start = hole_start;
5378 em->orig_start = hole_start;
5379 /*
5380 * don't adjust block start at all,
5381 * it is fixed at EXTENT_MAP_HOLE
5382 */
5383 em->block_start = hole_em->block_start;
5384 em->block_len = hole_len;
5385 } else {
5386 em->start = range_start;
5387 em->len = found;
5388 em->orig_start = range_start;
5389 em->block_start = EXTENT_MAP_DELALLOC;
5390 em->block_len = found;
5391 }
5392 } else if (hole_em) {
5393 return hole_em;
5394 }
5395out:
5396
5397 free_extent_map(hole_em);
5398 if (err) {
5399 free_extent_map(em);
5400 return ERR_PTR(err);
5401 }
5402 return em;
5403}
5404
5283static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5405static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5284 u64 start, u64 len) 5406 u64 start, u64 len)
5285{ 5407{
@@ -6102,7 +6224,7 @@ out:
6102static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6224static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6103 __u64 start, __u64 len) 6225 __u64 start, __u64 len)
6104{ 6226{
6105 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); 6227 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6106} 6228}
6107 6229
6108int btrfs_readpage(struct file *file, struct page *page) 6230int btrfs_readpage(struct file *file, struct page *page)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be2d4f6aaa5e..5fdb2abc4fa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1071 if (copy_from_user(&flags, arg, sizeof(flags))) 1071 if (copy_from_user(&flags, arg, sizeof(flags)))
1072 return -EFAULT; 1072 return -EFAULT;
1073 1073
1074 if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) 1074 if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
1075 return -EINVAL; 1075 return -EINVAL;
1076 1076
1077 if (flags & ~BTRFS_SUBVOL_RDONLY) 1077 if (flags & ~BTRFS_SUBVOL_RDONLY)
1078 return -EOPNOTSUPP; 1078 return -EOPNOTSUPP;
1079 1079
1080 if (!is_owner_or_cap(inode))
1081 return -EACCES;
1082
1080 down_write(&root->fs_info->subvol_sem); 1083 down_write(&root->fs_info->subvol_sem);
1081 1084
1082 /* nothing to do */ 1085 /* nothing to do */
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1097 goto out_reset; 1100 goto out_reset;
1098 } 1101 }
1099 1102
1100 ret = btrfs_update_root(trans, root, 1103 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1101 &root->root_key, &root->root_item); 1104 &root->root_key, &root->root_item);
1102 1105
1103 btrfs_commit_transaction(trans, root); 1106 btrfs_commit_transaction(trans, root);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index cc9b450399df..a178f5ebea78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
280 unsigned long tot_out; 280 unsigned long tot_out;
281 unsigned long tot_len; 281 unsigned long tot_len;
282 char *buf; 282 char *buf;
283 bool may_late_unmap, need_unmap;
283 284
284 data_in = kmap(pages_in[0]); 285 data_in = kmap(pages_in[0]);
285 tot_len = read_compress_length(data_in); 286 tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
300 301
301 tot_in += in_len; 302 tot_in += in_len;
302 working_bytes = in_len; 303 working_bytes = in_len;
304 may_late_unmap = need_unmap = false;
303 305
304 /* fast path: avoid using the working buffer */ 306 /* fast path: avoid using the working buffer */
305 if (in_page_bytes_left >= in_len) { 307 if (in_page_bytes_left >= in_len) {
306 buf = data_in + in_offset; 308 buf = data_in + in_offset;
307 bytes = in_len; 309 bytes = in_len;
310 may_late_unmap = true;
308 goto cont; 311 goto cont;
309 } 312 }
310 313
@@ -329,14 +332,17 @@ cont:
329 if (working_bytes == 0 && tot_in >= tot_len) 332 if (working_bytes == 0 && tot_in >= tot_len)
330 break; 333 break;
331 334
332 kunmap(pages_in[page_in_index]); 335 if (page_in_index + 1 >= total_pages_in) {
333 page_in_index++;
334 if (page_in_index >= total_pages_in) {
335 ret = -1; 336 ret = -1;
336 data_in = NULL;
337 goto done; 337 goto done;
338 } 338 }
339 data_in = kmap(pages_in[page_in_index]); 339
340 if (may_late_unmap)
341 need_unmap = true;
342 else
343 kunmap(pages_in[page_in_index]);
344
345 data_in = kmap(pages_in[++page_in_index]);
340 346
341 in_page_bytes_left = PAGE_CACHE_SIZE; 347 in_page_bytes_left = PAGE_CACHE_SIZE;
342 in_offset = 0; 348 in_offset = 0;
@@ -346,6 +352,8 @@ cont:
346 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); 352 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
347 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, 353 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
348 &out_len); 354 &out_len);
355 if (need_unmap)
356 kunmap(pages_in[page_in_index - 1]);
349 if (ret != LZO_E_OK) { 357 if (ret != LZO_E_OK) {
350 printk(KERN_WARNING "btrfs decompress failed\n"); 358 printk(KERN_WARNING "btrfs decompress failed\n");
351 ret = -1; 359 ret = -1;
@@ -363,8 +371,7 @@ cont:
363 break; 371 break;
364 } 372 }
365done: 373done:
366 if (data_in) 374 kunmap(pages_in[page_in_index]);
367 kunmap(pages_in[page_in_index]);
368 return ret; 375 return ret;
369} 376}
370 377
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0825e4ed9447..31ade5802ae8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3654 u32 item_size; 3654 u32 item_size;
3655 int ret; 3655 int ret;
3656 int err = 0; 3656 int err = 0;
3657 int progress = 0;
3657 3658
3658 path = btrfs_alloc_path(); 3659 path = btrfs_alloc_path();
3659 if (!path) 3660 if (!path)
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3666 } 3667 }
3667 3668
3668 while (1) { 3669 while (1) {
3670 progress++;
3669 trans = btrfs_start_transaction(rc->extent_root, 0); 3671 trans = btrfs_start_transaction(rc->extent_root, 0);
3670 BUG_ON(IS_ERR(trans)); 3672 BUG_ON(IS_ERR(trans));
3671 3673restart:
3672 if (update_backref_cache(trans, &rc->backref_cache)) { 3674 if (update_backref_cache(trans, &rc->backref_cache)) {
3673 btrfs_end_transaction(trans, rc->extent_root); 3675 btrfs_end_transaction(trans, rc->extent_root);
3674 continue; 3676 continue;
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3781 } 3783 }
3782 } 3784 }
3783 } 3785 }
3786 if (trans && progress && err == -ENOSPC) {
3787 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
3788 rc->block_group->flags);
3789 if (ret == 0) {
3790 err = 0;
3791 progress = 0;
3792 goto restart;
3793 }
3794 }
3784 3795
3785 btrfs_release_path(rc->extent_root, path); 3796 btrfs_release_path(rc->extent_root, path);
3786 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 3797 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a004008f7d28..d39a9895d932 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -155,7 +155,8 @@ enum {
155 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, 155 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
156 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 156 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
157 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 157 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
158 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, 158 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
159 Opt_enospc_debug, Opt_err,
159}; 160};
160 161
161static match_table_t tokens = { 162static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
184 {Opt_space_cache, "space_cache"}, 185 {Opt_space_cache, "space_cache"},
185 {Opt_clear_cache, "clear_cache"}, 186 {Opt_clear_cache, "clear_cache"},
186 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 187 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
188 {Opt_enospc_debug, "enospc_debug"},
187 {Opt_err, NULL}, 189 {Opt_err, NULL},
188}; 190};
189 191
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
358 case Opt_user_subvol_rm_allowed: 360 case Opt_user_subvol_rm_allowed:
359 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); 361 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
360 break; 362 break;
363 case Opt_enospc_debug:
364 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
365 break;
361 case Opt_err: 366 case Opt_err:
362 printk(KERN_INFO "btrfs: unrecognized mount option " 367 printk(KERN_INFO "btrfs: unrecognized mount option "
363 "'%s'\n", p); 368 "'%s'\n", p);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index af7dbca15276..dd13eb81ee40 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1338 1338
1339 ret = btrfs_shrink_device(device, 0); 1339 ret = btrfs_shrink_device(device, 0);
1340 if (ret) 1340 if (ret)
1341 goto error_brelse; 1341 goto error_undo;
1342 1342
1343 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1343 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1344 if (ret) 1344 if (ret)
1345 goto error_brelse; 1345 goto error_undo;
1346 1346
1347 device->in_fs_metadata = 0; 1347 device->in_fs_metadata = 0;
1348 1348
@@ -1416,6 +1416,13 @@ out:
1416 mutex_unlock(&root->fs_info->volume_mutex); 1416 mutex_unlock(&root->fs_info->volume_mutex);
1417 mutex_unlock(&uuid_mutex); 1417 mutex_unlock(&uuid_mutex);
1418 return ret; 1418 return ret;
1419error_undo:
1420 if (device->writeable) {
1421 list_add(&device->dev_alloc_list,
1422 &root->fs_info->fs_devices->alloc_list);
1423 root->fs_info->fs_devices->rw_devices++;
1424 }
1425 goto error_brelse;
1419} 1426}
1420 1427
1421/* 1428/*
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1633 device->dev_root = root->fs_info->dev_root; 1640 device->dev_root = root->fs_info->dev_root;
1634 device->bdev = bdev; 1641 device->bdev = bdev;
1635 device->in_fs_metadata = 1; 1642 device->in_fs_metadata = 1;
1636 device->mode = 0; 1643 device->mode = FMODE_EXCL;
1637 set_blocksize(device->bdev, 4096); 1644 set_blocksize(device->bdev, 4096);
1638 1645
1639 if (seeding_dev) { 1646 if (seeding_dev) {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0bc68de8edd7..f0aef787a102 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry)
60 } 60 }
61 di->dentry = dentry; 61 di->dentry = dentry;
62 di->lease_session = NULL; 62 di->lease_session = NULL;
63 di->parent_inode = igrab(dentry->d_parent->d_inode);
63 dentry->d_fsdata = di; 64 dentry->d_fsdata = di;
64 dentry->d_time = jiffies; 65 dentry->d_time = jiffies;
65 ceph_dentry_lru_add(dentry); 66 ceph_dentry_lru_add(dentry);
@@ -1033,7 +1034,7 @@ static void ceph_dentry_release(struct dentry *dentry)
1033 u64 snapid = CEPH_NOSNAP; 1034 u64 snapid = CEPH_NOSNAP;
1034 1035
1035 if (!IS_ROOT(dentry)) { 1036 if (!IS_ROOT(dentry)) {
1036 parent_inode = dentry->d_parent->d_inode; 1037 parent_inode = di->parent_inode;
1037 if (parent_inode) 1038 if (parent_inode)
1038 snapid = ceph_snap(parent_inode); 1039 snapid = ceph_snap(parent_inode);
1039 } 1040 }
@@ -1058,6 +1059,8 @@ static void ceph_dentry_release(struct dentry *dentry)
1058 kmem_cache_free(ceph_dentry_cachep, di); 1059 kmem_cache_free(ceph_dentry_cachep, di);
1059 dentry->d_fsdata = NULL; 1060 dentry->d_fsdata = NULL;
1060 } 1061 }
1062 if (parent_inode)
1063 iput(parent_inode);
1061} 1064}
1062 1065
1063static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1066static int ceph_snapdir_d_revalidate(struct dentry *dentry,
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 39c243acd062..f40b9139e437 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
584 if (lastinode) 584 if (lastinode)
585 iput(lastinode); 585 iput(lastinode);
586 586
587 dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); 587 list_for_each_entry(child, &realm->children, child_item) {
588 list_for_each_entry(child, &realm->children, child_item) 588 dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
589 queue_realm_cap_snaps(child); 589 realm, realm->ino, child, child->ino);
590 list_del_init(&child->dirty_item);
591 list_add(&child->dirty_item, &realm->dirty_item);
592 }
590 593
594 list_del_init(&realm->dirty_item);
591 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); 595 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
592} 596}
593 597
@@ -683,7 +687,9 @@ more:
683 * queue cap snaps _after_ we've built the new snap contexts, 687 * queue cap snaps _after_ we've built the new snap contexts,
684 * so that i_head_snapc can be set appropriately. 688 * so that i_head_snapc can be set appropriately.
685 */ 689 */
686 list_for_each_entry(realm, &dirty_realms, dirty_item) { 690 while (!list_empty(&dirty_realms)) {
691 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
692 dirty_item);
687 queue_realm_cap_snaps(realm); 693 queue_realm_cap_snaps(realm);
688 } 694 }
689 695
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20b907d76ae2..88fcaa21b801 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -207,6 +207,7 @@ struct ceph_dentry_info {
207 struct dentry *dentry; 207 struct dentry *dentry;
208 u64 time; 208 u64 time;
209 u64 offset; 209 u64 offset;
210 struct inode *parent_inode;
210}; 211};
211 212
212struct ceph_inode_xattrs_info { 213struct ceph_inode_xattrs_info {
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4a3330235d55..a9371b6578c0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
127extern const struct export_operations cifs_export_ops; 127extern const struct export_operations cifs_export_ops;
128#endif /* EXPERIMENTAL */ 128#endif /* EXPERIMENTAL */
129 129
130#define CIFS_VERSION "1.70" 130#define CIFS_VERSION "1.71"
131#endif /* _CIFSFS_H */ 131#endif /* _CIFSFS_H */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 8d9189f64477..79f641eeda30 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
170{ 170{
171 int rc, alen, slen; 171 int rc, alen, slen;
172 const char *pct; 172 const char *pct;
173 char *endp, scope_id[13]; 173 char scope_id[13];
174 struct sockaddr_in *s4 = (struct sockaddr_in *) dst; 174 struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
175 struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; 175 struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
176 176
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
197 memcpy(scope_id, pct + 1, slen); 197 memcpy(scope_id, pct + 1, slen);
198 scope_id[slen] = '\0'; 198 scope_id[slen] = '\0';
199 199
200 s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); 200 rc = strict_strtoul(scope_id, 0,
201 if (endp != scope_id + slen) 201 (unsigned long *)&s6->sin6_scope_id);
202 return 0; 202 rc = (rc == 0) ? 1 : 0;
203 } 203 }
204 204
205 return rc; 205 return rc;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 1adc9625a344..16765703131b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate:
656 656
657 if (type == LANMAN) { 657 if (type == LANMAN) {
658#ifdef CONFIG_CIFS_WEAK_PW_HASH 658#ifdef CONFIG_CIFS_WEAK_PW_HASH
659 char lnm_session_key[CIFS_SESS_KEY_SIZE]; 659 char lnm_session_key[CIFS_AUTH_RESP_SIZE];
660 660
661 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; 661 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
662 662
663 /* no capabilities flags in old lanman negotiation */ 663 /* no capabilities flags in old lanman negotiation */
664 664
665 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); 665 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
666 666
667 /* Calculate hash with password and copy into bcc_ptr. 667 /* Calculate hash with password and copy into bcc_ptr.
668 * Encryption Key (stored as in cryptkey) gets used if the 668 * Encryption Key (stored as in cryptkey) gets used if the
@@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate:
675 true : false, lnm_session_key); 675 true : false, lnm_session_key);
676 676
677 ses->flags |= CIFS_SES_LANMAN; 677 ses->flags |= CIFS_SES_LANMAN;
678 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); 678 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
679 bcc_ptr += CIFS_SESS_KEY_SIZE; 679 bcc_ptr += CIFS_AUTH_RESP_SIZE;
680 680
681 /* can not sign if LANMAN negotiated so no need 681 /* can not sign if LANMAN negotiated so no need
682 to calculate signing key? but what if server 682 to calculate signing key? but what if server
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 6fc4f319b550..534c1d46e69e 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
46{ 46{
47 struct dentry *lower_dentry; 47 struct dentry *lower_dentry;
48 struct vfsmount *lower_mnt; 48 struct vfsmount *lower_mnt;
49 struct dentry *dentry_save; 49 struct dentry *dentry_save = NULL;
50 struct vfsmount *vfsmount_save; 50 struct vfsmount *vfsmount_save = NULL;
51 int rc = 1; 51 int rc = 1;
52 52
53 if (nd->flags & LOOKUP_RCU) 53 if (nd && nd->flags & LOOKUP_RCU)
54 return -ECHILD; 54 return -ECHILD;
55 55
56 lower_dentry = ecryptfs_dentry_to_lower(dentry); 56 lower_dentry = ecryptfs_dentry_to_lower(dentry);
57 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); 57 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
58 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) 58 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
59 goto out; 59 goto out;
60 dentry_save = nd->path.dentry; 60 if (nd) {
61 vfsmount_save = nd->path.mnt; 61 dentry_save = nd->path.dentry;
62 nd->path.dentry = lower_dentry; 62 vfsmount_save = nd->path.mnt;
63 nd->path.mnt = lower_mnt; 63 nd->path.dentry = lower_dentry;
64 nd->path.mnt = lower_mnt;
65 }
64 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); 66 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
65 nd->path.dentry = dentry_save; 67 if (nd) {
66 nd->path.mnt = vfsmount_save; 68 nd->path.dentry = dentry_save;
69 nd->path.mnt = vfsmount_save;
70 }
67 if (dentry->d_inode) { 71 if (dentry->d_inode) {
68 struct inode *lower_inode = 72 struct inode *lower_inode =
69 ecryptfs_inode_to_lower(dentry->d_inode); 73 ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index dbc84ed96336..e00753496e3e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry,
632 u32 flags); 632 u32 flags);
633int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 633int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
634 struct dentry *lower_dentry, 634 struct dentry *lower_dentry,
635 struct inode *ecryptfs_dir_inode, 635 struct inode *ecryptfs_dir_inode);
636 struct nameidata *ecryptfs_nd);
637int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, 636int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
638 size_t *decrypted_name_size, 637 size_t *decrypted_name_size,
639 struct dentry *ecryptfs_dentry, 638 struct dentry *ecryptfs_dentry,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 81e10e6a9443..7d1050e254f9 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
317 317
318const struct file_operations ecryptfs_dir_fops = { 318const struct file_operations ecryptfs_dir_fops = {
319 .readdir = ecryptfs_readdir, 319 .readdir = ecryptfs_readdir,
320 .read = generic_read_dir,
320 .unlocked_ioctl = ecryptfs_unlocked_ioctl, 321 .unlocked_ioctl = ecryptfs_unlocked_ioctl,
321#ifdef CONFIG_COMPAT 322#ifdef CONFIG_COMPAT
322 .compat_ioctl = ecryptfs_compat_ioctl, 323 .compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index bd33f87a1907..b592938a84bc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
74 unsigned int flags_save; 74 unsigned int flags_save;
75 int rc; 75 int rc;
76 76
77 dentry_save = nd->path.dentry; 77 if (nd) {
78 vfsmount_save = nd->path.mnt; 78 dentry_save = nd->path.dentry;
79 flags_save = nd->flags; 79 vfsmount_save = nd->path.mnt;
80 nd->path.dentry = lower_dentry; 80 flags_save = nd->flags;
81 nd->path.mnt = lower_mnt; 81 nd->path.dentry = lower_dentry;
82 nd->flags &= ~LOOKUP_OPEN; 82 nd->path.mnt = lower_mnt;
83 nd->flags &= ~LOOKUP_OPEN;
84 }
83 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); 85 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
84 nd->path.dentry = dentry_save; 86 if (nd) {
85 nd->path.mnt = vfsmount_save; 87 nd->path.dentry = dentry_save;
86 nd->flags = flags_save; 88 nd->path.mnt = vfsmount_save;
89 nd->flags = flags_save;
90 }
87 return rc; 91 return rc;
88} 92}
89 93
@@ -241,8 +245,7 @@ out:
241 */ 245 */
242int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 246int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
243 struct dentry *lower_dentry, 247 struct dentry *lower_dentry,
244 struct inode *ecryptfs_dir_inode, 248 struct inode *ecryptfs_dir_inode)
245 struct nameidata *ecryptfs_nd)
246{ 249{
247 struct dentry *lower_dir_dentry; 250 struct dentry *lower_dir_dentry;
248 struct vfsmount *lower_mnt; 251 struct vfsmount *lower_mnt;
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
290 goto out; 293 goto out;
291 if (special_file(lower_inode->i_mode)) 294 if (special_file(lower_inode->i_mode))
292 goto out; 295 goto out;
293 if (!ecryptfs_nd)
294 goto out;
295 /* Released in this function */ 296 /* Released in this function */
296 page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); 297 page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
297 if (!page_virt) { 298 if (!page_virt) {
@@ -349,75 +350,6 @@ out:
349} 350}
350 351
351/** 352/**
352 * ecryptfs_new_lower_dentry
353 * @name: The name of the new dentry.
354 * @lower_dir_dentry: Parent directory of the new dentry.
355 * @nd: nameidata from last lookup.
356 *
357 * Create a new dentry or get it from lower parent dir.
358 */
359static struct dentry *
360ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
361 struct nameidata *nd)
362{
363 struct dentry *new_dentry;
364 struct dentry *tmp;
365 struct inode *lower_dir_inode;
366
367 lower_dir_inode = lower_dir_dentry->d_inode;
368
369 tmp = d_alloc(lower_dir_dentry, name);
370 if (!tmp)
371 return ERR_PTR(-ENOMEM);
372
373 mutex_lock(&lower_dir_inode->i_mutex);
374 new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
375 mutex_unlock(&lower_dir_inode->i_mutex);
376
377 if (!new_dentry)
378 new_dentry = tmp;
379 else
380 dput(tmp);
381
382 return new_dentry;
383}
384
385
386/**
387 * ecryptfs_lookup_one_lower
388 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
389 * @lower_dir_dentry: lower parent directory
390 * @name: lower file name
391 *
392 * Get the lower dentry from vfs. If lower dentry does not exist yet,
393 * create it.
394 */
395static struct dentry *
396ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
397 struct dentry *lower_dir_dentry, struct qstr *name)
398{
399 struct nameidata nd;
400 struct vfsmount *lower_mnt;
401 int err;
402
403 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
404 ecryptfs_dentry->d_parent));
405 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
406 mntput(lower_mnt);
407
408 if (!err) {
409 /* we dont need the mount */
410 mntput(nd.path.mnt);
411 return nd.path.dentry;
412 }
413 if (err != -ENOENT)
414 return ERR_PTR(err);
415
416 /* create a new lower dentry */
417 return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
418}
419
420/**
421 * ecryptfs_lookup 353 * ecryptfs_lookup
422 * @ecryptfs_dir_inode: The eCryptfs directory inode 354 * @ecryptfs_dir_inode: The eCryptfs directory inode
423 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up 355 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
434 size_t encrypted_and_encoded_name_size; 366 size_t encrypted_and_encoded_name_size;
435 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; 367 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
436 struct dentry *lower_dir_dentry, *lower_dentry; 368 struct dentry *lower_dir_dentry, *lower_dentry;
437 struct qstr lower_name;
438 int rc = 0; 369 int rc = 0;
439 370
440 if ((ecryptfs_dentry->d_name.len == 1 371 if ((ecryptfs_dentry->d_name.len == 1
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
444 goto out_d_drop; 375 goto out_d_drop;
445 } 376 }
446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); 377 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
447 lower_name.name = ecryptfs_dentry->d_name.name; 378 mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
448 lower_name.len = ecryptfs_dentry->d_name.len; 379 lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
449 lower_name.hash = ecryptfs_dentry->d_name.hash; 380 lower_dir_dentry,
450 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 381 ecryptfs_dentry->d_name.len);
451 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 382 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
452 lower_dir_dentry->d_inode, &lower_name);
453 if (rc < 0)
454 goto out_d_drop;
455 }
456 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
457 lower_dir_dentry, &lower_name);
458 if (IS_ERR(lower_dentry)) { 383 if (IS_ERR(lower_dentry)) {
459 rc = PTR_ERR(lower_dentry); 384 rc = PTR_ERR(lower_dentry);
460 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 385 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
461 "[%d] on lower_dentry = [%s]\n", __func__, rc, 386 "[%d] on lower_dentry = [%s]\n", __func__, rc,
462 encrypted_and_encoded_name); 387 encrypted_and_encoded_name);
463 goto out_d_drop; 388 goto out_d_drop;
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
479 "filename; rc = [%d]\n", __func__, rc); 404 "filename; rc = [%d]\n", __func__, rc);
480 goto out_d_drop; 405 goto out_d_drop;
481 } 406 }
482 lower_name.name = encrypted_and_encoded_name; 407 mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
483 lower_name.len = encrypted_and_encoded_name_size; 408 lower_dentry = lookup_one_len(encrypted_and_encoded_name,
484 lower_name.hash = full_name_hash(lower_name.name, lower_name.len); 409 lower_dir_dentry,
485 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 410 encrypted_and_encoded_name_size);
486 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 411 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
487 lower_dir_dentry->d_inode, &lower_name);
488 if (rc < 0)
489 goto out_d_drop;
490 }
491 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
492 lower_dir_dentry, &lower_name);
493 if (IS_ERR(lower_dentry)) { 412 if (IS_ERR(lower_dentry)) {
494 rc = PTR_ERR(lower_dentry); 413 rc = PTR_ERR(lower_dentry);
495 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 414 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
496 "[%d] on lower_dentry = [%s]\n", __func__, rc, 415 "[%d] on lower_dentry = [%s]\n", __func__, rc,
497 encrypted_and_encoded_name); 416 encrypted_and_encoded_name);
498 goto out_d_drop; 417 goto out_d_drop;
499 } 418 }
500lookup_and_interpose: 419lookup_and_interpose:
501 rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, 420 rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
502 ecryptfs_dir_inode, 421 ecryptfs_dir_inode);
503 ecryptfs_nd);
504 goto out; 422 goto out;
505out_d_drop: 423out_d_drop:
506 d_drop(ecryptfs_dentry); 424 d_drop(ecryptfs_dentry);
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1092 rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), 1010 rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
1093 ecryptfs_dentry_to_lower(dentry), &lower_stat); 1011 ecryptfs_dentry_to_lower(dentry), &lower_stat);
1094 if (!rc) { 1012 if (!rc) {
1013 fsstack_copy_attr_all(dentry->d_inode,
1014 ecryptfs_inode_to_lower(dentry->d_inode));
1095 generic_fillattr(dentry->d_inode, stat); 1015 generic_fillattr(dentry->d_inode, stat);
1096 stat->blocks = lower_stat.blocks; 1016 stat->blocks = lower_stat.blocks;
1097 } 1017 }
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e0194b3e14d6..d9a591773919 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get);
99 * @ctx: [in] Pointer to eventfd context. 99 * @ctx: [in] Pointer to eventfd context.
100 * 100 *
101 * The eventfd context reference must have been previously acquired either 101 * The eventfd context reference must have been previously acquired either
102 * with eventfd_ctx_get() or eventfd_ctx_fdget()). 102 * with eventfd_ctx_get() or eventfd_ctx_fdget().
103 */ 103 */
104void eventfd_ctx_put(struct eventfd_ctx *ctx) 104void eventfd_ctx_put(struct eventfd_ctx *ctx)
105{ 105{
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
146 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. 146 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
147 * @ctx: [in] Pointer to eventfd context. 147 * @ctx: [in] Pointer to eventfd context.
148 * @wait: [in] Wait queue to be removed. 148 * @wait: [in] Wait queue to be removed.
149 * @cnt: [out] Pointer to the 64bit conter value. 149 * @cnt: [out] Pointer to the 64-bit counter value.
150 * 150 *
151 * Returns zero if successful, or the following error codes: 151 * Returns %0 if successful, or the following error codes:
152 * 152 *
153 * -EAGAIN : The operation would have blocked. 153 * -EAGAIN : The operation would have blocked.
154 * 154 *
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
175 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. 175 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
176 * @ctx: [in] Pointer to eventfd context. 176 * @ctx: [in] Pointer to eventfd context.
177 * @no_wait: [in] Different from zero if the operation should not block. 177 * @no_wait: [in] Different from zero if the operation should not block.
178 * @cnt: [out] Pointer to the 64bit conter value. 178 * @cnt: [out] Pointer to the 64-bit counter value.
179 * 179 *
180 * Returns zero if successful, or the following error codes: 180 * Returns %0 if successful, or the following error codes:
181 * 181 *
182 * -EAGAIN : The operation would have blocked but @no_wait was nonzero. 182 * -EAGAIN : The operation would have blocked but @no_wait was non-zero.
183 * -ERESTARTSYS : A signal interrupted the wait operation. 183 * -ERESTARTSYS : A signal interrupted the wait operation.
184 * 184 *
185 * If @no_wait is zero, the function might sleep until the eventfd internal 185 * If @no_wait is zero, the function might sleep until the eventfd internal
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 267d0ada4541..4a09af9e9a63 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
63 * cleanup path and it is also acquired by eventpoll_release_file() 63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then 64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
66 * It is also acquired when inserting an epoll fd onto another epoll
67 * fd. We do this so that we walk the epoll tree and ensure that this
68 * insertion does not create a cycle of epoll file descriptors, which
69 * could lead to deadlock. We need a global mutex to prevent two
70 * simultaneous inserts (A into B and B into A) from racing and
71 * constructing a cycle without either insert observing that it is
72 * going to.
66 * It is possible to drop the "ep->mtx" and to use the global 73 * It is possible to drop the "ep->mtx" and to use the global
67 * mutex "epmutex" (together with "ep->lock") to have it working, 74 * mutex "epmutex" (together with "ep->lock") to have it working,
68 * but having "ep->mtx" will make the interface more scalable. 75 * but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
224 */ 231 */
225static DEFINE_MUTEX(epmutex); 232static DEFINE_MUTEX(epmutex);
226 233
234/* Used to check for epoll file descriptor inclusion loops */
235static struct nested_calls poll_loop_ncalls;
236
227/* Used for safe wake up implementation */ 237/* Used for safe wake up implementation */
228static struct nested_calls poll_safewake_ncalls; 238static struct nested_calls poll_safewake_ncalls;
229 239
@@ -1198,6 +1208,62 @@ retry:
1198 return res; 1208 return res;
1199} 1209}
1200 1210
1211/**
1212 * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1213 * API, to verify that adding an epoll file inside another
1214 * epoll structure, does not violate the constraints, in
1215 * terms of closed loops, or too deep chains (which can
1216 * result in excessive stack usage).
1217 *
1218 * @priv: Pointer to the epoll file to be currently checked.
1219 * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1220 * data structure pointer.
1221 * @call_nests: Current dept of the @ep_call_nested() call stack.
1222 *
1223 * Returns: Returns zero if adding the epoll @file inside current epoll
1224 * structure @ep does not violate the constraints, or -1 otherwise.
1225 */
1226static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1227{
1228 int error = 0;
1229 struct file *file = priv;
1230 struct eventpoll *ep = file->private_data;
1231 struct rb_node *rbp;
1232 struct epitem *epi;
1233
1234 mutex_lock(&ep->mtx);
1235 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1236 epi = rb_entry(rbp, struct epitem, rbn);
1237 if (unlikely(is_file_epoll(epi->ffd.file))) {
1238 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1239 ep_loop_check_proc, epi->ffd.file,
1240 epi->ffd.file->private_data, current);
1241 if (error != 0)
1242 break;
1243 }
1244 }
1245 mutex_unlock(&ep->mtx);
1246
1247 return error;
1248}
1249
1250/**
1251 * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1252 * another epoll file (represented by @ep) does not create
1253 * closed loops or too deep chains.
1254 *
1255 * @ep: Pointer to the epoll private data structure.
1256 * @file: Pointer to the epoll file to be checked.
1257 *
1258 * Returns: Returns zero if adding the epoll @file inside current epoll
1259 * structure @ep does not violate the constraints, or -1 otherwise.
1260 */
1261static int ep_loop_check(struct eventpoll *ep, struct file *file)
1262{
1263 return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1264 ep_loop_check_proc, file, ep, current);
1265}
1266
1201/* 1267/*
1202 * Open an eventpoll file descriptor. 1268 * Open an eventpoll file descriptor.
1203 */ 1269 */
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1246 struct epoll_event __user *, event) 1312 struct epoll_event __user *, event)
1247{ 1313{
1248 int error; 1314 int error;
1315 int did_lock_epmutex = 0;
1249 struct file *file, *tfile; 1316 struct file *file, *tfile;
1250 struct eventpoll *ep; 1317 struct eventpoll *ep;
1251 struct epitem *epi; 1318 struct epitem *epi;
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1287 */ 1354 */
1288 ep = file->private_data; 1355 ep = file->private_data;
1289 1356
1357 /*
1358 * When we insert an epoll file descriptor, inside another epoll file
1359 * descriptor, there is the change of creating closed loops, which are
1360 * better be handled here, than in more critical paths.
1361 *
1362 * We hold epmutex across the loop check and the insert in this case, in
1363 * order to prevent two separate inserts from racing and each doing the
1364 * insert "at the same time" such that ep_loop_check passes on both
1365 * before either one does the insert, thereby creating a cycle.
1366 */
1367 if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
1368 mutex_lock(&epmutex);
1369 did_lock_epmutex = 1;
1370 error = -ELOOP;
1371 if (ep_loop_check(ep, tfile) != 0)
1372 goto error_tgt_fput;
1373 }
1374
1375
1290 mutex_lock(&ep->mtx); 1376 mutex_lock(&ep->mtx);
1291 1377
1292 /* 1378 /*
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1322 mutex_unlock(&ep->mtx); 1408 mutex_unlock(&ep->mtx);
1323 1409
1324error_tgt_fput: 1410error_tgt_fput:
1411 if (unlikely(did_lock_epmutex))
1412 mutex_unlock(&epmutex);
1413
1325 fput(tfile); 1414 fput(tfile);
1326error_fput: 1415error_fput:
1327 fput(file); 1416 fput(file);
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
1441 EP_ITEM_COST; 1530 EP_ITEM_COST;
1442 BUG_ON(max_user_watches < 0); 1531 BUG_ON(max_user_watches < 0);
1443 1532
1533 /*
1534 * Initialize the structure used to perform epoll file descriptor
1535 * inclusion loops checks.
1536 */
1537 ep_nested_calls_init(&poll_loop_ncalls);
1538
1444 /* Initialize the structure used to perform safe poll wait head wake ups */ 1539 /* Initialize the structure used to perform safe poll wait head wake ups */
1445 ep_nested_calls_init(&poll_safewake_ncalls); 1540 ep_nested_calls_init(&poll_safewake_ncalls);
1446 1541
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 264e95d02830..4d70db110cfc 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
273 if (!new_de) 273 if (!new_de)
274 goto out_dir; 274 goto out_dir;
275 inode_inc_link_count(old_inode);
276 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 275 err = exofs_set_link(new_dir, new_de, new_page, old_inode);
277 new_inode->i_ctime = CURRENT_TIME; 276 new_inode->i_ctime = CURRENT_TIME;
278 if (dir_de) 277 if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
286 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 285 if (new_dir->i_nlink >= EXOFS_LINK_MAX)
287 goto out_dir; 286 goto out_dir;
288 } 287 }
289 inode_inc_link_count(old_inode);
290 err = exofs_add_link(new_dentry, old_inode); 288 err = exofs_add_link(new_dentry, old_inode);
291 if (err) { 289 if (err)
292 inode_dec_link_count(old_inode);
293 goto out_dir; 290 goto out_dir;
294 }
295 if (dir_de) 291 if (dir_de)
296 inode_inc_link_count(new_dir); 292 inode_inc_link_count(new_dir);
297 } 293 }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
299 old_inode->i_ctime = CURRENT_TIME; 295 old_inode->i_ctime = CURRENT_TIME;
300 296
301 exofs_delete_entry(old_de, old_page); 297 exofs_delete_entry(old_de, old_page);
302 inode_dec_link_count(old_inode); 298 mark_inode_dirty(old_inode);
303 299
304 if (dir_de) { 300 if (dir_de) {
305 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); 301 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 2e1d8341d827..adb91855ccd0 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
345 if (!new_de) 345 if (!new_de)
346 goto out_dir; 346 goto out_dir;
347 inode_inc_link_count(old_inode);
348 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 347 ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
349 new_inode->i_ctime = CURRENT_TIME_SEC; 348 new_inode->i_ctime = CURRENT_TIME_SEC;
350 if (dir_de) 349 if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
356 if (new_dir->i_nlink >= EXT2_LINK_MAX) 355 if (new_dir->i_nlink >= EXT2_LINK_MAX)
357 goto out_dir; 356 goto out_dir;
358 } 357 }
359 inode_inc_link_count(old_inode);
360 err = ext2_add_link(new_dentry, old_inode); 358 err = ext2_add_link(new_dentry, old_inode);
361 if (err) { 359 if (err)
362 inode_dec_link_count(old_inode);
363 goto out_dir; 360 goto out_dir;
364 }
365 if (dir_de) 361 if (dir_de)
366 inode_inc_link_count(new_dir); 362 inode_inc_link_count(new_dir);
367 } 363 }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
369 /* 365 /*
370 * Like most other Unix systems, set the ctime for inodes on a 366 * Like most other Unix systems, set the ctime for inodes on a
371 * rename. 367 * rename.
372 * inode_dec_link_count() will mark the inode dirty.
373 */ 368 */
374 old_inode->i_ctime = CURRENT_TIME_SEC; 369 old_inode->i_ctime = CURRENT_TIME_SEC;
370 mark_inode_dirty(old_inode);
375 371
376 ext2_delete_entry (old_de, old_page); 372 ext2_delete_entry (old_de, old_page);
377 inode_dec_link_count(old_inode);
378 373
379 if (dir_de) { 374 if (dir_de) {
380 if (old_dir != new_dir) 375 if (old_dir != new_dir)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfed8447ed80..83543b5ff941 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
1283 if (err) 1283 if (err)
1284 return err; 1284 return err;
1285 1285
1286 if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) 1286 if (attr->ia_valid & ATTR_OPEN) {
1287 return 0; 1287 if (fc->atomic_o_trunc)
1288 return 0;
1289 file = NULL;
1290 }
1288 1291
1289 if (attr->ia_valid & ATTR_SIZE) 1292 if (attr->ia_valid & ATTR_SIZE)
1290 is_truncate = true; 1293 is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc1c826..9e0832dbb1e3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
86 return ff; 86 return ff;
87} 87}
88 88
89static void fuse_release_async(struct work_struct *work)
90{
91 struct fuse_req *req;
92 struct fuse_conn *fc;
93 struct path path;
94
95 req = container_of(work, struct fuse_req, misc.release.work);
96 path = req->misc.release.path;
97 fc = get_fuse_conn(path.dentry->d_inode);
98
99 fuse_put_request(fc, req);
100 path_put(&path);
101}
102
89static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 103static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
90{ 104{
91 path_put(&req->misc.release.path); 105 if (fc->destroy_req) {
106 /*
107 * If this is a fuseblk mount, then it's possible that
108 * releasing the path will result in releasing the
109 * super block and sending the DESTROY request. If
110 * the server is single threaded, this would hang.
111 * For this reason do the path_put() in a separate
112 * thread.
113 */
114 atomic_inc(&req->count);
115 INIT_WORK(&req->misc.release.work, fuse_release_async);
116 schedule_work(&req->misc.release.work);
117 } else {
118 path_put(&req->misc.release.path);
119 }
92} 120}
93 121
94static void fuse_file_put(struct fuse_file *ff) 122static void fuse_file_put(struct fuse_file *ff, bool sync)
95{ 123{
96 if (atomic_dec_and_test(&ff->count)) { 124 if (atomic_dec_and_test(&ff->count)) {
97 struct fuse_req *req = ff->reserved_req; 125 struct fuse_req *req = ff->reserved_req;
98 126
99 req->end = fuse_release_end; 127 if (sync) {
100 fuse_request_send_background(ff->fc, req); 128 fuse_request_send(ff->fc, req);
129 path_put(&req->misc.release.path);
130 fuse_put_request(ff->fc, req);
131 } else {
132 req->end = fuse_release_end;
133 fuse_request_send_background(ff->fc, req);
134 }
101 kfree(ff); 135 kfree(ff);
102 } 136 }
103} 137}
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
219 * Normally this will send the RELEASE request, however if 253 * Normally this will send the RELEASE request, however if
220 * some asynchronous READ or WRITE requests are outstanding, 254 * some asynchronous READ or WRITE requests are outstanding,
221 * the sending will be delayed. 255 * the sending will be delayed.
256 *
257 * Make the release synchronous if this is a fuseblk mount,
258 * synchronous RELEASE is allowed (and desirable) in this case
259 * because the server can be trusted not to screw up.
222 */ 260 */
223 fuse_file_put(ff); 261 fuse_file_put(ff, ff->fc->destroy_req != NULL);
224} 262}
225 263
226static int fuse_open(struct inode *inode, struct file *file) 264static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
558 page_cache_release(page); 596 page_cache_release(page);
559 } 597 }
560 if (req->ff) 598 if (req->ff)
561 fuse_file_put(req->ff); 599 fuse_file_put(req->ff, false);
562} 600}
563 601
564static void fuse_send_readpages(struct fuse_req *req, struct file *file) 602static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1137static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1175static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1138{ 1176{
1139 __free_page(req->pages[0]); 1177 __free_page(req->pages[0]);
1140 fuse_file_put(req->ff); 1178 fuse_file_put(req->ff, false);
1141} 1179}
1142 1180
1143static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1181static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a2f9e9..d4286947bc2c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
21#include <linux/rwsem.h> 21#include <linux/rwsem.h>
22#include <linux/rbtree.h> 22#include <linux/rbtree.h>
23#include <linux/poll.h> 23#include <linux/poll.h>
24#include <linux/workqueue.h>
24 25
25/** Max number of pages that can be used in a single read request */ 26/** Max number of pages that can be used in a single read request */
26#define FUSE_MAX_PAGES_PER_REQ 32 27#define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@ struct fuse_req {
262 /** Data for asynchronous requests */ 263 /** Data for asynchronous requests */
263 union { 264 union {
264 struct { 265 struct {
265 struct fuse_release_in in; 266 union {
267 struct fuse_release_in in;
268 struct work_struct work;
269 };
266 struct path path; 270 struct path path;
267 } release; 271 } release;
268 struct fuse_init_in init_in; 272 struct fuse_init_in init_in;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 85ba027d1c4d..72c31a315d96 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
59 struct address_space *mapping = (struct address_space *)(gl + 1); 59 struct address_space *mapping = (struct address_space *)(gl + 1);
60 60
61 gfs2_init_glock_once(gl); 61 gfs2_init_glock_once(gl);
62 memset(mapping, 0, sizeof(*mapping)); 62 address_space_init_once(mapping);
63 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
64 spin_lock_init(&mapping->tree_lock);
65 spin_lock_init(&mapping->i_mmap_lock);
66 INIT_LIST_HEAD(&mapping->private_list);
67 spin_lock_init(&mapping->private_lock);
68 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
69 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
70} 63}
71 64
72/** 65/**
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index afa66aaa2237..b4d70b13be92 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
238} 238}
239 239
240/* 240/*
241 * hfs_unlink() 241 * hfs_remove()
242 * 242 *
243 * This is the unlink() entry in the inode_operations structure for 243 * This serves as both unlink() and rmdir() in the inode_operations
244 * regular HFS directories. The purpose is to delete an existing 244 * structure for regular HFS directories. The purpose is to delete
245 * file, given the inode for the parent directory and the name 245 * an existing child, given the inode for the parent directory and
246 * (and its length) of the existing file. 246 * the name (and its length) of the existing directory.
247 */
248static int hfs_unlink(struct inode *dir, struct dentry *dentry)
249{
250 struct inode *inode;
251 int res;
252
253 inode = dentry->d_inode;
254 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
255 if (res)
256 return res;
257
258 drop_nlink(inode);
259 hfs_delete_inode(inode);
260 inode->i_ctime = CURRENT_TIME_SEC;
261 mark_inode_dirty(inode);
262
263 return res;
264}
265
266/*
267 * hfs_rmdir()
268 * 247 *
269 * This is the rmdir() entry in the inode_operations structure for 248 * HFS does not have hardlinks, so both rmdir and unlink set the
270 * regular HFS directories. The purpose is to delete an existing 249 * link count to 0. The only difference is the emptiness check.
271 * directory, given the inode for the parent directory and the name
272 * (and its length) of the existing directory.
273 */ 250 */
274static int hfs_rmdir(struct inode *dir, struct dentry *dentry) 251static int hfs_remove(struct inode *dir, struct dentry *dentry)
275{ 252{
276 struct inode *inode; 253 struct inode *inode = dentry->d_inode;
277 int res; 254 int res;
278 255
279 inode = dentry->d_inode; 256 if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
280 if (inode->i_size != 2)
281 return -ENOTEMPTY; 257 return -ENOTEMPTY;
282 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
283 if (res) 259 if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
307 283
308 /* Unlink destination if it already exists */ 284 /* Unlink destination if it already exists */
309 if (new_dentry->d_inode) { 285 if (new_dentry->d_inode) {
310 res = hfs_unlink(new_dir, new_dentry); 286 res = hfs_remove(new_dir, new_dentry);
311 if (res) 287 if (res)
312 return res; 288 return res;
313 } 289 }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
332const struct inode_operations hfs_dir_inode_operations = { 308const struct inode_operations hfs_dir_inode_operations = {
333 .create = hfs_create, 309 .create = hfs_create,
334 .lookup = hfs_lookup, 310 .lookup = hfs_lookup,
335 .unlink = hfs_unlink, 311 .unlink = hfs_remove,
336 .mkdir = hfs_mkdir, 312 .mkdir = hfs_mkdir,
337 .rmdir = hfs_rmdir, 313 .rmdir = hfs_remove,
338 .rename = hfs_rename, 314 .rename = hfs_rename,
339 .setattr = hfs_inode_setattr, 315 .setattr = hfs_inode_setattr,
340}; 316};
diff --git a/fs/inode.c b/fs/inode.c
index da85e56378f3..0647d80accf6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
295 call_rcu(&inode->i_rcu, i_callback); 295 call_rcu(&inode->i_rcu, i_callback);
296} 296}
297 297
298void address_space_init_once(struct address_space *mapping)
299{
300 memset(mapping, 0, sizeof(*mapping));
301 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
302 spin_lock_init(&mapping->tree_lock);
303 spin_lock_init(&mapping->i_mmap_lock);
304 INIT_LIST_HEAD(&mapping->private_list);
305 spin_lock_init(&mapping->private_lock);
306 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
307 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
308 mutex_init(&mapping->unmap_mutex);
309}
310EXPORT_SYMBOL(address_space_init_once);
311
298/* 312/*
299 * These are initializations that only need to be done 313 * These are initializations that only need to be done
300 * once, because the fields are idempotent across use 314 * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
308 INIT_LIST_HEAD(&inode->i_devices); 322 INIT_LIST_HEAD(&inode->i_devices);
309 INIT_LIST_HEAD(&inode->i_wb_list); 323 INIT_LIST_HEAD(&inode->i_wb_list);
310 INIT_LIST_HEAD(&inode->i_lru); 324 INIT_LIST_HEAD(&inode->i_lru);
311 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 325 address_space_init_once(&inode->i_data);
312 spin_lock_init(&inode->i_data.tree_lock);
313 spin_lock_init(&inode->i_data.i_mmap_lock);
314 INIT_LIST_HEAD(&inode->i_data.private_list);
315 spin_lock_init(&inode->i_data.private_lock);
316 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
317 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
318 i_size_ordered_init(inode); 326 i_size_ordered_init(inode);
319#ifdef CONFIG_FSNOTIFY 327#ifdef CONFIG_FSNOTIFY
320 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 328 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb)
540/** 548/**
541 * invalidate_inodes - attempt to free all inodes on a superblock 549 * invalidate_inodes - attempt to free all inodes on a superblock
542 * @sb: superblock to operate on 550 * @sb: superblock to operate on
551 * @kill_dirty: flag to guide handling of dirty inodes
543 * 552 *
544 * Attempts to free all inodes for a given superblock. If there were any 553 * Attempts to free all inodes for a given superblock. If there were any
545 * busy inodes return a non-zero value, else zero. 554 * busy inodes return a non-zero value, else zero.
555 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
556 * them as busy.
546 */ 557 */
547int invalidate_inodes(struct super_block *sb) 558int invalidate_inodes(struct super_block *sb, bool kill_dirty)
548{ 559{
549 int busy = 0; 560 int busy = 0;
550 struct inode *inode, *next; 561 struct inode *inode, *next;
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
556 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 567 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
557 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 568 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
558 continue; 569 continue;
570 if (inode->i_state & I_DIRTY && !kill_dirty) {
571 busy = 1;
572 continue;
573 }
559 if (atomic_read(&inode->i_count)) { 574 if (atomic_read(&inode->i_count)) {
560 busy = 1; 575 busy = 1;
561 continue; 576 continue;
diff --git a/fs/internal.h b/fs/internal.h
index 0663568b1247..9b976b57d7fe 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
112 */ 112 */
113extern int get_nr_dirty_inodes(void); 113extern int get_nr_dirty_inodes(void);
114extern void evict_inodes(struct super_block *); 114extern void evict_inodes(struct super_block *);
115extern int invalidate_inodes(struct super_block *); 115extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index ce7337ddfdbf..6e6777f1b4b2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
213 new_de = minix_find_entry(new_dentry, &new_page); 213 new_de = minix_find_entry(new_dentry, &new_page);
214 if (!new_de) 214 if (!new_de)
215 goto out_dir; 215 goto out_dir;
216 inode_inc_link_count(old_inode);
217 minix_set_link(new_de, new_page, old_inode); 216 minix_set_link(new_de, new_page, old_inode);
218 new_inode->i_ctime = CURRENT_TIME_SEC; 217 new_inode->i_ctime = CURRENT_TIME_SEC;
219 if (dir_de) 218 if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
225 if (new_dir->i_nlink >= info->s_link_max) 224 if (new_dir->i_nlink >= info->s_link_max)
226 goto out_dir; 225 goto out_dir;
227 } 226 }
228 inode_inc_link_count(old_inode);
229 err = minix_add_link(new_dentry, old_inode); 227 err = minix_add_link(new_dentry, old_inode);
230 if (err) { 228 if (err)
231 inode_dec_link_count(old_inode);
232 goto out_dir; 229 goto out_dir;
233 }
234 if (dir_de) 230 if (dir_de)
235 inode_inc_link_count(new_dir); 231 inode_inc_link_count(new_dir);
236 } 232 }
237 233
238 minix_delete_entry(old_de, old_page); 234 minix_delete_entry(old_de, old_page);
239 inode_dec_link_count(old_inode); 235 mark_inode_dirty(old_inode);
240 236
241 if (dir_de) { 237 if (dir_de) {
242 minix_set_link(dir_de, dir_page, new_dir); 238 minix_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b0b95371696..d1edf26025dc 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
1244 */ 1244 */
1245 br_write_lock(vfsmount_lock); 1245 br_write_lock(vfsmount_lock);
1246 if (mnt_get_count(mnt) != 2) { 1246 if (mnt_get_count(mnt) != 2) {
1247 br_write_lock(vfsmount_lock); 1247 br_write_unlock(vfsmount_lock);
1248 return -EBUSY; 1248 return -EBUSY;
1249 } 1249 }
1250 br_write_unlock(vfsmount_lock); 1250 br_write_unlock(vfsmount_lock);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 388e9e8f5286..85f7baa15f5d 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,11 +35,6 @@
35#include "btnode.h" 35#include "btnode.h"
36 36
37 37
38void nilfs_btnode_cache_init_once(struct address_space *btnc)
39{
40 nilfs_mapping_init_once(btnc);
41}
42
43static const struct address_space_operations def_btnode_aops = { 38static const struct address_space_operations def_btnode_aops = {
44 .sync_page = block_sync_page, 39 .sync_page = block_sync_page,
45}; 40};
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 79037494f1e0..1b8ebd888c28 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
37 struct buffer_head *newbh; 37 struct buffer_head *newbh;
38}; 38};
39 39
40void nilfs_btnode_cache_init_once(struct address_space *);
41void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); 40void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
42void nilfs_btnode_cache_clear(struct address_space *); 41void nilfs_btnode_cache_clear(struct address_space *);
43struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, 42struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 6a0e2a189f60..a0babd2bff6a 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
454 struct backing_dev_info *bdi = inode->i_sb->s_bdi; 454 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
455 455
456 INIT_LIST_HEAD(&shadow->frozen_buffers); 456 INIT_LIST_HEAD(&shadow->frozen_buffers);
457 nilfs_mapping_init_once(&shadow->frozen_data); 457 address_space_init_once(&shadow->frozen_data);
458 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); 458 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
459 nilfs_mapping_init_once(&shadow->frozen_btnodes); 459 address_space_init_once(&shadow->frozen_btnodes);
460 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); 460 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
461 mi->mi_shadow = shadow; 461 mi->mi_shadow = shadow;
462 return 0; 462 return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 98034271cd02..161791d26458 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
398 if (!new_de) 398 if (!new_de)
399 goto out_dir; 399 goto out_dir;
400 inc_nlink(old_inode);
401 nilfs_set_link(new_dir, new_de, new_page, old_inode); 400 nilfs_set_link(new_dir, new_de, new_page, old_inode);
402 nilfs_mark_inode_dirty(new_dir); 401 nilfs_mark_inode_dirty(new_dir);
403 new_inode->i_ctime = CURRENT_TIME; 402 new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
411 if (new_dir->i_nlink >= NILFS_LINK_MAX) 410 if (new_dir->i_nlink >= NILFS_LINK_MAX)
412 goto out_dir; 411 goto out_dir;
413 } 412 }
414 inc_nlink(old_inode);
415 err = nilfs_add_link(new_dentry, old_inode); 413 err = nilfs_add_link(new_dentry, old_inode);
416 if (err) { 414 if (err)
417 drop_nlink(old_inode);
418 nilfs_mark_inode_dirty(old_inode);
419 goto out_dir; 415 goto out_dir;
420 }
421 if (dir_de) { 416 if (dir_de) {
422 inc_nlink(new_dir); 417 inc_nlink(new_dir);
423 nilfs_mark_inode_dirty(new_dir); 418 nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431 old_inode->i_ctime = CURRENT_TIME; 426 old_inode->i_ctime = CURRENT_TIME;
432 427
433 nilfs_delete_entry(old_de, old_page); 428 nilfs_delete_entry(old_de, old_page);
434 drop_nlink(old_inode);
435 429
436 if (dir_de) { 430 if (dir_de) {
437 nilfs_set_link(old_inode, dir_de, dir_page, new_dir); 431 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0c432416cfef..a585b35fd6bc 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
492 return nc; 492 return nc;
493} 493}
494 494
495void nilfs_mapping_init_once(struct address_space *mapping)
496{
497 memset(mapping, 0, sizeof(*mapping));
498 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
499 spin_lock_init(&mapping->tree_lock);
500 INIT_LIST_HEAD(&mapping->private_list);
501 spin_lock_init(&mapping->private_lock);
502
503 spin_lock_init(&mapping->i_mmap_lock);
504 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
505 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
506}
507
508void nilfs_mapping_init(struct address_space *mapping, 495void nilfs_mapping_init(struct address_space *mapping,
509 struct backing_dev_info *bdi, 496 struct backing_dev_info *bdi,
510 const struct address_space_operations *aops) 497 const struct address_space_operations *aops)
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 622df27cd891..2a00953ebd5f 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
61int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); 61int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
62void nilfs_copy_back_pages(struct address_space *, struct address_space *); 62void nilfs_copy_back_pages(struct address_space *, struct address_space *);
63void nilfs_clear_dirty_pages(struct address_space *); 63void nilfs_clear_dirty_pages(struct address_space *);
64void nilfs_mapping_init_once(struct address_space *mapping);
65void nilfs_mapping_init(struct address_space *mapping, 64void nilfs_mapping_init(struct address_space *mapping,
66 struct backing_dev_info *bdi, 65 struct backing_dev_info *bdi,
67 const struct address_space_operations *aops); 66 const struct address_space_operations *aops);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55ebae5c7f39..2de9f636792a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
430 nilfs_segctor_map_segsum_entry( 430 nilfs_segctor_map_segsum_entry(
431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
432 432
433 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 433 if (NILFS_I(inode)->i_root &&
434 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
434 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 435 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
435 /* skip finfo */ 436 /* skip finfo */
436} 437}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 58fd707174e1..1673b3d99842 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
1279#ifdef CONFIG_NILFS_XATTR 1279#ifdef CONFIG_NILFS_XATTR
1280 init_rwsem(&ii->xattr_sem); 1280 init_rwsem(&ii->xattr_sem);
1281#endif 1281#endif
1282 nilfs_btnode_cache_init_once(&ii->i_btnode_cache); 1282 address_space_init_once(&ii->i_btnode_cache);
1283 ii->i_bmap = &ii->i_bmap_data; 1283 ii->i_bmap = &ii->i_bmap_data;
1284 inode_init_once(&ii->vfs_inode); 1284 inode_init_once(&ii->vfs_inode);
1285} 1285}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b97f9c0..6180da1e37e6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
405 ocfs2_quota_trans_credits(sb); 405 ocfs2_quota_trans_credits(sb);
406} 406}
407 407
408/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + 408/* data block for new dir/symlink, allocation of directory block, dx_root
409 * bitmap block for the new bit) dx_root update for free list */ 409 * update for free list */
410#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) 410#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
411 411
412static inline int ocfs2_add_dir_index_credits(struct super_block *sb) 412static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
413{ 413{
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160e93e9..19ebc5aad391 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3228 u32 num_clusters, unsigned int e_flags) 3228 u32 num_clusters, unsigned int e_flags)
3229{ 3229{
3230 int ret, delete, index, credits = 0; 3230 int ret, delete, index, credits = 0;
3231 u32 new_bit, new_len; 3231 u32 new_bit, new_len, orig_num_clusters;
3232 unsigned int set_len; 3232 unsigned int set_len;
3233 struct ocfs2_super *osb = OCFS2_SB(sb); 3233 struct ocfs2_super *osb = OCFS2_SB(sb);
3234 handle_t *handle; 3234 handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3261 goto out; 3261 goto out;
3262 } 3262 }
3263 3263
3264 orig_num_clusters = num_clusters;
3265
3264 while (num_clusters) { 3266 while (num_clusters) {
3265 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3267 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3266 p_cluster, num_clusters, 3268 p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3348 * in write-back mode. 3350 * in write-back mode.
3349 */ 3351 */
3350 if (context->get_clusters == ocfs2_di_get_clusters) { 3352 if (context->get_clusters == ocfs2_di_get_clusters) {
3351 ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); 3353 ret = ocfs2_cow_sync_writeback(sb, context, cpos,
3354 orig_num_clusters);
3352 if (ret) 3355 if (ret)
3353 mlog_errno(ret); 3356 mlog_errno(ret);
3354 } 3357 }
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 38f986d2447e..36c423fb0635 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
1316 struct mount_options *mopt, 1316 struct mount_options *mopt,
1317 int is_remount) 1317 int is_remount)
1318{ 1318{
1319 int status; 1319 int status, user_stack = 0;
1320 char *p; 1320 char *p;
1321 u32 tmp; 1321 u32 tmp;
1322 1322
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
1459 memcpy(mopt->cluster_stack, args[0].from, 1459 memcpy(mopt->cluster_stack, args[0].from,
1460 OCFS2_STACK_LABEL_LEN); 1460 OCFS2_STACK_LABEL_LEN);
1461 mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; 1461 mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
1462 /*
1463 * Open code the memcmp here as we don't have
1464 * an osb to pass to
1465 * ocfs2_userspace_stack().
1466 */
1467 if (memcmp(mopt->cluster_stack,
1468 OCFS2_CLASSIC_CLUSTER_STACK,
1469 OCFS2_STACK_LABEL_LEN))
1470 user_stack = 1;
1462 break; 1471 break;
1463 case Opt_inode64: 1472 case Opt_inode64:
1464 mopt->mount_opt |= OCFS2_MOUNT_INODE64; 1473 mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
1514 } 1523 }
1515 } 1524 }
1516 1525
1517 /* Ensure only one heartbeat mode */ 1526 if (user_stack == 0) {
1518 tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | 1527 /* Ensure only one heartbeat mode */
1519 OCFS2_MOUNT_HB_NONE); 1528 tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
1520 if (hweight32(tmp) != 1) { 1529 OCFS2_MOUNT_HB_GLOBAL |
1521 mlog(ML_ERROR, "Invalid heartbeat mount options\n"); 1530 OCFS2_MOUNT_HB_NONE);
1522 status = 0; 1531 if (hweight32(tmp) != 1) {
1523 goto bail; 1532 mlog(ML_ERROR, "Invalid heartbeat mount options\n");
1533 status = 0;
1534 goto bail;
1535 }
1524 } 1536 }
1525 1537
1526 status = 1; 1538 status = 1;
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625c7aa5..b10e3540d5b7 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
251 } 251 }
252 252
253 vm->vblk_size = get_unaligned_be32(data + 0x08); 253 vm->vblk_size = get_unaligned_be32(data + 0x08);
254 if (vm->vblk_size == 0) {
255 ldm_error ("Illegal VBLK size");
256 return false;
257 }
258
254 vm->vblk_offset = get_unaligned_be32(data + 0x0C); 259 vm->vblk_offset = get_unaligned_be32(data + 0x0C);
255 vm->last_vblk_seq = get_unaligned_be32(data + 0x04); 260 vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
256 261
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d9396a4fc7ff..927cbd115e53 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
233 return; 233 return;
234 root = of_find_node_by_path("/"); 234 root = of_find_node_by_path("/");
235 if (root == NULL) { 235 if (root == NULL) {
236 printk(KERN_ERR "/proc/device-tree: can't find root\n"); 236 pr_debug("/proc/device-tree: can't find root\n");
237 return; 237 return;
238 } 238 }
239 proc_device_tree_add_node(root, proc_device_tree); 239 proc_device_tree_add_node(root, proc_device_tree);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ba5f51ec3458..68fdf45cc6c9 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
772 dentry, inode, &security); 772 dentry, inode, &security);
773 if (retval) { 773 if (retval) {
774 dir->i_nlink--; 774 DEC_DIR_INODE_NLINK(dir)
775 goto out_failed; 775 goto out_failed;
776 } 776 }
777 777
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index b427b1208c26..e474fbcf8bde 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
245 new_de = sysv_find_entry(new_dentry, &new_page); 245 new_de = sysv_find_entry(new_dentry, &new_page);
246 if (!new_de) 246 if (!new_de)
247 goto out_dir; 247 goto out_dir;
248 inode_inc_link_count(old_inode);
249 sysv_set_link(new_de, new_page, old_inode); 248 sysv_set_link(new_de, new_page, old_inode);
250 new_inode->i_ctime = CURRENT_TIME_SEC; 249 new_inode->i_ctime = CURRENT_TIME_SEC;
251 if (dir_de) 250 if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
257 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 256 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
258 goto out_dir; 257 goto out_dir;
259 } 258 }
260 inode_inc_link_count(old_inode);
261 err = sysv_add_link(new_dentry, old_inode); 259 err = sysv_add_link(new_dentry, old_inode);
262 if (err) { 260 if (err)
263 inode_dec_link_count(old_inode);
264 goto out_dir; 261 goto out_dir;
265 }
266 if (dir_de) 262 if (dir_de)
267 inode_inc_link_count(new_dir); 263 inode_inc_link_count(new_dir);
268 } 264 }
269 265
270 sysv_delete_entry(old_de, old_page); 266 sysv_delete_entry(old_de, old_page);
271 inode_dec_link_count(old_inode); 267 mark_inode_dirty(old_inode);
272 268
273 if (dir_de) { 269 if (dir_de) {
274 sysv_set_link(dir_de, dir_page, new_dir); 270 sysv_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2be0f9eb86d2..b7c338d5e9df 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -32,6 +32,8 @@
32#include <linux/crc-itu-t.h> 32#include <linux/crc-itu-t.h>
33#include <linux/exportfs.h> 33#include <linux/exportfs.h>
34 34
35enum { UDF_MAX_LINKS = 0xffff };
36
35static inline int udf_match(int len1, const unsigned char *name1, int len2, 37static inline int udf_match(int len1, const unsigned char *name1, int len2,
36 const unsigned char *name2) 38 const unsigned char *name2)
37{ 39{
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
650 struct udf_inode_info *iinfo; 652 struct udf_inode_info *iinfo;
651 653
652 err = -EMLINK; 654 err = -EMLINK;
653 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 655 if (dir->i_nlink >= UDF_MAX_LINKS)
654 goto out; 656 goto out;
655 657
656 err = -EIO; 658 err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1034 struct fileIdentDesc cfi, *fi; 1036 struct fileIdentDesc cfi, *fi;
1035 int err; 1037 int err;
1036 1038
1037 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1039 if (inode->i_nlink >= UDF_MAX_LINKS)
1038 return -EMLINK; 1040 return -EMLINK;
1039 }
1040 1041
1041 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1042 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1042 if (!fi) { 1043 if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1131 goto end_rename; 1132 goto end_rename;
1132 1133
1133 retval = -EMLINK; 1134 retval = -EMLINK;
1134 if (!new_inode && 1135 if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
1135 new_dir->i_nlink >=
1136 (256 << sizeof(new_dir->i_nlink)) - 1)
1137 goto end_rename; 1136 goto end_rename;
1138 } 1137 }
1139 if (!nfi) { 1138 if (!nfi) {
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 12f39b9e4437..d6f681535eb8 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
307 if (!new_de) 307 if (!new_de)
308 goto out_dir; 308 goto out_dir;
309 inode_inc_link_count(old_inode);
310 ufs_set_link(new_dir, new_de, new_page, old_inode); 309 ufs_set_link(new_dir, new_de, new_page, old_inode);
311 new_inode->i_ctime = CURRENT_TIME_SEC; 310 new_inode->i_ctime = CURRENT_TIME_SEC;
312 if (dir_de) 311 if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
318 if (new_dir->i_nlink >= UFS_LINK_MAX) 317 if (new_dir->i_nlink >= UFS_LINK_MAX)
319 goto out_dir; 318 goto out_dir;
320 } 319 }
321 inode_inc_link_count(old_inode);
322 err = ufs_add_link(new_dentry, old_inode); 320 err = ufs_add_link(new_dentry, old_inode);
323 if (err) { 321 if (err)
324 inode_dec_link_count(old_inode);
325 goto out_dir; 322 goto out_dir;
326 }
327 if (dir_de) 323 if (dir_de)
328 inode_inc_link_count(new_dir); 324 inode_inc_link_count(new_dir);
329 } 325 }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
331 /* 327 /*
332 * Like most other Unix systems, set the ctime for inodes on a 328 * Like most other Unix systems, set the ctime for inodes on a
333 * rename. 329 * rename.
334 * inode_dec_link_count() will mark the inode dirty.
335 */ 330 */
336 old_inode->i_ctime = CURRENT_TIME_SEC; 331 old_inode->i_ctime = CURRENT_TIME_SEC;
337 332
338 ufs_delete_entry(old_dir, old_de, old_page); 333 ufs_delete_entry(old_dir, old_de, old_page);
339 inode_dec_link_count(old_inode); 334 mark_inode_dirty(old_inode);
340 335
341 if (dir_de) { 336 if (dir_de) {
342 ufs_set_link(old_inode, dir_de, dir_page, new_dir); 337 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
index 05201ae719e5..d61611c88012 100644
--- a/fs/xfs/linux-2.6/xfs_discard.c
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -152,6 +152,8 @@ xfs_ioc_trim(
152 152
153 if (!capable(CAP_SYS_ADMIN)) 153 if (!capable(CAP_SYS_ADMIN))
154 return -XFS_ERROR(EPERM); 154 return -XFS_ERROR(EPERM);
155 if (!blk_queue_discard(q))
156 return -XFS_ERROR(EOPNOTSUPP);
155 if (copy_from_user(&range, urange, sizeof(range))) 157 if (copy_from_user(&range, urange, sizeof(range)))
156 return -XFS_ERROR(EFAULT); 158 return -XFS_ERROR(EFAULT);
157 159
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index f5e2a19e0f8e..0ca0e3c024d7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
695 xfs_mount_t *mp, 695 xfs_mount_t *mp,
696 void __user *arg) 696 void __user *arg)
697{ 697{
698 xfs_fsop_geom_v1_t fsgeo; 698 xfs_fsop_geom_t fsgeo;
699 int error; 699 int error;
700 700
701 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); 701 error = xfs_fs_geometry(mp, &fsgeo, 3);
702 if (error) 702 if (error)
703 return -error; 703 return -error;
704 704
705 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 705 /*
706 * Caller should have passed an argument of type
707 * xfs_fsop_geom_v1_t. This is a proper subset of the
708 * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
709 */
710 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
706 return -XFS_ERROR(EFAULT); 711 return -XFS_ERROR(EFAULT);
707 return 0; 712 return 0;
708} 713}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index cec89dd5d7d2..85668efb3e3e 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -53,6 +53,9 @@ xfs_fs_geometry(
53 xfs_fsop_geom_t *geo, 53 xfs_fsop_geom_t *geo,
54 int new_version) 54 int new_version)
55{ 55{
56
57 memset(geo, 0, sizeof(*geo));
58
56 geo->blocksize = mp->m_sb.sb_blocksize; 59 geo->blocksize = mp->m_sb.sb_blocksize;
57 geo->rtextsize = mp->m_sb.sb_rextsize; 60 geo->rtextsize = mp->m_sb.sb_rextsize;
58 geo->agblocks = mp->m_sb.sb_agblocks; 61 geo->agblocks = mp->m_sb.sb_agblocks;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188df221..b4bfe338ea0e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6 6
7#include <linux/mm_types.h>
8
7#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 9#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8extern int ptep_set_access_flags(struct vm_area_struct *vma, 10extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep, 11 unsigned long address, pte_t *ptep,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fe29aadb129d..348843b80150 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1101,7 +1101,7 @@ struct drm_device {
1101 struct platform_device *platformdev; /**< Platform device struture */ 1101 struct platform_device *platformdev; /**< Platform device struture */
1102 1102
1103 struct drm_sg_mem *sg; /**< Scatter gather memory */ 1103 struct drm_sg_mem *sg; /**< Scatter gather memory */
1104 int num_crtcs; /**< Number of CRTCs on this device */ 1104 unsigned int num_crtcs; /**< Number of CRTCs on this device */
1105 void *dev_private; /**< device private data */ 1105 void *dev_private; /**< device private data */
1106 void *mm_private; 1106 void *mm_private;
1107 struct address_space *dev_mapping; 1107 struct address_space *dev_mapping;
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 5cb86c307f5d..fc4875433817 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
99 * structure of raw payloads passed to add_key() or instantiate key 99 * structure of raw payloads passed to add_key() or instantiate key
100 */ 100 */
101struct rxrpc_key_data_v1 { 101struct rxrpc_key_data_v1 {
102 u32 kif_version; /* 1 */
103 u16 security_index; 102 u16 security_index;
104 u16 ticket_length; 103 u16 ticket_length;
105 u32 expiry; /* time_t */ 104 u32 expiry; /* time_t */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d18ff34670a..d5063e1b5555 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
699extern void blk_stop_queue(struct request_queue *q); 699extern void blk_stop_queue(struct request_queue *q);
700extern void blk_sync_queue(struct request_queue *q); 700extern void blk_sync_queue(struct request_queue *q);
701extern void __blk_stop_queue(struct request_queue *q); 701extern void __blk_stop_queue(struct request_queue *q);
702extern void __blk_run_queue(struct request_queue *); 702extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
703extern void blk_run_queue(struct request_queue *); 703extern void blk_run_queue(struct request_queue *);
704extern int blk_rq_map_user(struct request_queue *, struct request *, 704extern int blk_rq_map_user(struct request_queue *, struct request *,
705 struct rq_map_data *, void __user *, unsigned long, 705 struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
1088 1088
1089struct work_struct; 1089struct work_struct;
1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1091int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1092 1091
1093#ifdef CONFIG_BLK_CGROUP 1092#ifdef CONFIG_BLK_CGROUP
1094/* 1093/*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1136extern int blk_throtl_init(struct request_queue *q); 1135extern int blk_throtl_init(struct request_queue *q);
1137extern void blk_throtl_exit(struct request_queue *q); 1136extern void blk_throtl_exit(struct request_queue *q);
1138extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1137extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1139extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1140extern void throtl_shutdown_timer_wq(struct request_queue *q); 1138extern void throtl_shutdown_timer_wq(struct request_queue *q);
1141#else /* CONFIG_BLK_DEV_THROTTLING */ 1139#else /* CONFIG_BLK_DEV_THROTTLING */
1142static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 1140static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1146 1144
1147static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1145static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1148static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1146static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1149static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1150static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1147static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1151#endif /* CONFIG_BLK_DEV_THROTTLING */ 1148#endif /* CONFIG_BLK_DEV_THROTTLING */
1152 1149
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3395cf7130f5..b22fb0d3db0f 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
245 245
246extern void blk_dump_cmd(char *buf, struct request *rq); 246extern void blk_dump_cmd(char *buf, struct request *rq);
247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
248extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
249 248
250#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 249#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
251 250
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 4c5b26e0cc48..a3680a16718f 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -87,6 +87,45 @@ struct ieee_pfc {
87 __u64 indications[IEEE_8021QAZ_MAX_TCS]; 87 __u64 indications[IEEE_8021QAZ_MAX_TCS];
88}; 88};
89 89
90/* CEE DCBX std supported values */
91#define CEE_DCBX_MAX_PGS 8
92#define CEE_DCBX_MAX_PRIO 8
93
94/**
95 * struct cee_pg - CEE Prioity-Group managed object
96 *
97 * @willing: willing bit in the PG tlv
98 * @error: error bit in the PG tlv
99 * @pg_en: enable bit of the PG feature
100 * @tcs_supported: number of traffic classes supported
101 * @pg_bw: bandwidth percentage for each priority group
102 * @prio_pg: priority to PG mapping indexed by priority
103 */
104struct cee_pg {
105 __u8 willing;
106 __u8 error;
107 __u8 pg_en;
108 __u8 tcs_supported;
109 __u8 pg_bw[CEE_DCBX_MAX_PGS];
110 __u8 prio_pg[CEE_DCBX_MAX_PGS];
111};
112
113/**
114 * struct cee_pfc - CEE PFC managed object
115 *
116 * @willing: willing bit in the PFC tlv
117 * @error: error bit in the PFC tlv
118 * @pfc_en: bitmap indicating pfc enabled traffic classes
119 * @tcs_supported: number of traffic classes supported
120 */
121struct cee_pfc {
122 __u8 willing;
123 __u8 error;
124 __u8 pfc_en;
125 __u8 tcs_supported;
126};
127
128
90/* This structure contains the IEEE 802.1Qaz APP managed object. This 129/* This structure contains the IEEE 802.1Qaz APP managed object. This
91 * object is also used for the CEE std as well. There is no difference 130 * object is also used for the CEE std as well. There is no difference
92 * between the objects. 131 * between the objects.
@@ -110,6 +149,20 @@ struct dcb_app {
110 __u16 protocol; 149 __u16 protocol;
111}; 150};
112 151
152/**
153 * struct dcb_peer_app_info - APP feature information sent by the peer
154 *
155 * @willing: willing bit in the peer APP tlv
156 * @error: error bit in the peer APP tlv
157 *
158 * In addition to this information the full peer APP tlv also contains
159 * a table of 'app_count' APP objects defined above.
160 */
161struct dcb_peer_app_info {
162 __u8 willing;
163 __u8 error;
164};
165
113struct dcbmsg { 166struct dcbmsg {
114 __u8 dcb_family; 167 __u8 dcb_family;
115 __u8 cmd; 168 __u8 cmd;
@@ -144,6 +197,7 @@ struct dcbmsg {
144 * @DCB_CMD_SDCBX: set DCBX engine configuration 197 * @DCB_CMD_SDCBX: set DCBX engine configuration
145 * @DCB_CMD_GFEATCFG: get DCBX features flags 198 * @DCB_CMD_GFEATCFG: get DCBX features flags
146 * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags 199 * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags
200 * @DCB_CMD_CEE_GET: get CEE aggregated configuration
147 */ 201 */
148enum dcbnl_commands { 202enum dcbnl_commands {
149 DCB_CMD_UNDEFINED, 203 DCB_CMD_UNDEFINED,
@@ -186,6 +240,8 @@ enum dcbnl_commands {
186 DCB_CMD_GFEATCFG, 240 DCB_CMD_GFEATCFG,
187 DCB_CMD_SFEATCFG, 241 DCB_CMD_SFEATCFG,
188 242
243 DCB_CMD_CEE_GET,
244
189 __DCB_CMD_ENUM_MAX, 245 __DCB_CMD_ENUM_MAX,
190 DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, 246 DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
191}; 247};
@@ -208,6 +264,7 @@ enum dcbnl_commands {
208 * @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED) 264 * @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED)
209 * @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8) 265 * @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8)
210 * @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED) 266 * @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED)
267 * @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED)
211 */ 268 */
212enum dcbnl_attrs { 269enum dcbnl_attrs {
213 DCB_ATTR_UNDEFINED, 270 DCB_ATTR_UNDEFINED,
@@ -231,15 +288,32 @@ enum dcbnl_attrs {
231 DCB_ATTR_DCBX, 288 DCB_ATTR_DCBX,
232 DCB_ATTR_FEATCFG, 289 DCB_ATTR_FEATCFG,
233 290
291 /* CEE nested attributes */
292 DCB_ATTR_CEE,
293
234 __DCB_ATTR_ENUM_MAX, 294 __DCB_ATTR_ENUM_MAX,
235 DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1, 295 DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
236}; 296};
237 297
298/**
299 * enum ieee_attrs - IEEE 802.1Qaz get/set attributes
300 *
301 * @DCB_ATTR_IEEE_UNSPEC: unspecified
302 * @DCB_ATTR_IEEE_ETS: negotiated ETS configuration
303 * @DCB_ATTR_IEEE_PFC: negotiated PFC configuration
304 * @DCB_ATTR_IEEE_APP_TABLE: negotiated APP configuration
305 * @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only
306 * @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
307 * @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
308 */
238enum ieee_attrs { 309enum ieee_attrs {
239 DCB_ATTR_IEEE_UNSPEC, 310 DCB_ATTR_IEEE_UNSPEC,
240 DCB_ATTR_IEEE_ETS, 311 DCB_ATTR_IEEE_ETS,
241 DCB_ATTR_IEEE_PFC, 312 DCB_ATTR_IEEE_PFC,
242 DCB_ATTR_IEEE_APP_TABLE, 313 DCB_ATTR_IEEE_APP_TABLE,
314 DCB_ATTR_IEEE_PEER_ETS,
315 DCB_ATTR_IEEE_PEER_PFC,
316 DCB_ATTR_IEEE_PEER_APP,
243 __DCB_ATTR_IEEE_MAX 317 __DCB_ATTR_IEEE_MAX
244}; 318};
245#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) 319#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
@@ -252,6 +326,31 @@ enum ieee_attrs_app {
252#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1) 326#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
253 327
254/** 328/**
329 * enum cee_attrs - CEE DCBX get attributes
330 *
331 * @DCB_ATTR_CEE_UNSPEC: unspecified
332 * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only
333 * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only
334 * @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only
335 */
336enum cee_attrs {
337 DCB_ATTR_CEE_UNSPEC,
338 DCB_ATTR_CEE_PEER_PG,
339 DCB_ATTR_CEE_PEER_PFC,
340 DCB_ATTR_CEE_PEER_APP_TABLE,
341 __DCB_ATTR_CEE_MAX
342};
343#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1)
344
345enum peer_app_attr {
346 DCB_ATTR_CEE_PEER_APP_UNSPEC,
347 DCB_ATTR_CEE_PEER_APP_INFO,
348 DCB_ATTR_CEE_PEER_APP,
349 __DCB_ATTR_CEE_PEER_APP_MAX
350};
351#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1)
352
353/**
255 * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs 354 * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
256 * 355 *
257 * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors 356 * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bd3215940c37..e38b50a4b9d2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -649,6 +649,7 @@ struct address_space {
649 spinlock_t private_lock; /* for use by the address_space */ 649 spinlock_t private_lock; /* for use by the address_space */
650 struct list_head private_list; /* ditto */ 650 struct list_head private_list; /* ditto */
651 struct address_space *assoc_mapping; /* ditto */ 651 struct address_space *assoc_mapping; /* ditto */
652 struct mutex unmap_mutex; /* to protect unmapping */
652} __attribute__((aligned(sizeof(long)))); 653} __attribute__((aligned(sizeof(long))));
653 /* 654 /*
654 * On most architectures that alignment is already the case; but 655 * On most architectures that alignment is already the case; but
@@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk,
2139 struct block_device *bdev); 2140 struct block_device *bdev);
2140extern int revalidate_disk(struct gendisk *); 2141extern int revalidate_disk(struct gendisk *);
2141extern int check_disk_change(struct block_device *); 2142extern int check_disk_change(struct block_device *);
2142extern int __invalidate_device(struct block_device *); 2143extern int __invalidate_device(struct block_device *, bool);
2143extern int invalidate_partition(struct gendisk *, int); 2144extern int invalidate_partition(struct gendisk *, int);
2144#endif 2145#endif
2145unsigned long invalidate_mapping_pages(struct address_space *mapping, 2146unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
2225 2226
2226extern int inode_init_always(struct super_block *, struct inode *); 2227extern int inode_init_always(struct super_block *, struct inode *);
2227extern void inode_init_once(struct inode *); 2228extern void inode_init_once(struct inode *);
2229extern void address_space_init_once(struct address_space *mapping);
2228extern void ihold(struct inode * inode); 2230extern void ihold(struct inode * inode);
2229extern void iput(struct inode *); 2231extern void iput(struct inode *);
2230extern struct inode * igrab(struct inode *); 2232extern struct inode * igrab(struct inode *);
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 3fd36845ca45..ef4f0b6083a3 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -71,6 +71,7 @@ struct wm8994 {
71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
72 72
73 /* Used over suspend/resume */ 73 /* Used over suspend/resume */
74 bool suspended;
74 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 75 u16 ldo_regs[WM8994_NUM_LDO_REGS];
75 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 76 u16 gpio_regs[WM8994_NUM_GPIO_REGS];
76 77
diff --git a/include/linux/module.h b/include/linux/module.h
index 9bdf27c7615b..5de42043dff0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -62,7 +62,7 @@ struct module_version_attribute {
62 struct module_attribute mattr; 62 struct module_attribute mattr;
63 const char *module_name; 63 const char *module_name;
64 const char *version; 64 const char *version;
65}; 65} __attribute__ ((__aligned__(sizeof(void *))));
66 66
67struct module_kobject 67struct module_kobject
68{ 68{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ffe56c16df8a..6bd5d460b7c1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -75,9 +75,6 @@ struct wireless_dev;
75#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 75#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
76#define NET_RX_DROP 1 /* packet dropped */ 76#define NET_RX_DROP 1 /* packet dropped */
77 77
78/* Initial net device group. All devices belong to group 0 by default. */
79#define INIT_NETDEV_GROUP 0
80
81/* 78/*
82 * Transmit return codes: transmit return codes originate from three different 79 * Transmit return codes: transmit return codes originate from three different
83 * namespaces: 80 * namespaces:
@@ -141,6 +138,9 @@ static inline bool dev_xmit_complete(int rc)
141 138
142#define MAX_ADDR_LEN 32 /* Largest hardware address length */ 139#define MAX_ADDR_LEN 32 /* Largest hardware address length */
143 140
141/* Initial net device group. All devices belong to group 0 by default. */
142#define INIT_NETDEV_GROUP 0
143
144#ifdef __KERNEL__ 144#ifdef __KERNEL__
145/* 145/*
146 * Compute the worst case header length according to the protocols 146 * Compute the worst case header length according to the protocols
@@ -871,6 +871,10 @@ struct net_device_ops {
871 unsigned int sgc); 871 unsigned int sgc);
872 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 872 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
873 u16 xid); 873 u16 xid);
874 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
875 u16 xid,
876 struct scatterlist *sgl,
877 unsigned int sgc);
874#define NETDEV_FCOE_WWNN 0 878#define NETDEV_FCOE_WWNN 0
875#define NETDEV_FCOE_WWPN 1 879#define NETDEV_FCOE_WWPN 1
876 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 880 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -1765,8 +1769,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1765static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1769static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1766{ 1770{
1767 if (WARN_ON(!dev_queue)) { 1771 if (WARN_ON(!dev_queue)) {
1768 printk(KERN_INFO "netif_stop_queue() cannot be called before " 1772 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1769 "register_netdev()");
1770 return; 1773 return;
1771 } 1774 }
1772 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1775 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index e2b9e63afa68..4c4ac3f3ce5a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -160,10 +160,6 @@ struct netlink_skb_parms {
160 struct ucred creds; /* Skb credentials */ 160 struct ucred creds; /* Skb credentials */
161 __u32 pid; 161 __u32 pid;
162 __u32 dst_group; 162 __u32 dst_group;
163 kernel_cap_t eff_cap;
164 __u32 loginuid; /* Login (audit) uid */
165 __u32 sessionid; /* Session id (audit) */
166 __u32 sid; /* SELinux security id */
167}; 163};
168 164
169#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) 165#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 821ffb954f14..30022189104d 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -1243,6 +1243,8 @@ enum nl80211_rate_info {
1243 * @NL80211_STA_INFO_LLID: the station's mesh LLID 1243 * @NL80211_STA_INFO_LLID: the station's mesh LLID
1244 * @NL80211_STA_INFO_PLID: the station's mesh PLID 1244 * @NL80211_STA_INFO_PLID: the station's mesh PLID
1245 * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station 1245 * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
1246 * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested
1247 * attribute, like NL80211_STA_INFO_TX_BITRATE.
1246 * @__NL80211_STA_INFO_AFTER_LAST: internal 1248 * @__NL80211_STA_INFO_AFTER_LAST: internal
1247 * @NL80211_STA_INFO_MAX: highest possible station info attribute 1249 * @NL80211_STA_INFO_MAX: highest possible station info attribute
1248 */ 1250 */
@@ -1261,6 +1263,7 @@ enum nl80211_sta_info {
1261 NL80211_STA_INFO_TX_RETRIES, 1263 NL80211_STA_INFO_TX_RETRIES,
1262 NL80211_STA_INFO_TX_FAILED, 1264 NL80211_STA_INFO_TX_FAILED,
1263 NL80211_STA_INFO_SIGNAL_AVG, 1265 NL80211_STA_INFO_SIGNAL_AVG,
1266 NL80211_STA_INFO_RX_BITRATE,
1264 1267
1265 /* keep last */ 1268 /* keep last */
1266 __NL80211_STA_INFO_AFTER_LAST, 1269 __NL80211_STA_INFO_AFTER_LAST,
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab38270..21415cc91cbb 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -431,6 +431,8 @@ struct dev_pm_info {
431 struct list_head entry; 431 struct list_head entry;
432 struct completion completion; 432 struct completion completion;
433 struct wakeup_source *wakeup; 433 struct wakeup_source *wakeup;
434#else
435 unsigned int should_wakeup:1;
434#endif 436#endif
435#ifdef CONFIG_PM_RUNTIME 437#ifdef CONFIG_PM_RUNTIME
436 struct timer_list suspend_timer; 438 struct timer_list suspend_timer;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00dd6b63..03a67db03d01 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
109 return dev->power.can_wakeup; 109 return dev->power.can_wakeup;
110} 110}
111 111
112static inline bool device_may_wakeup(struct device *dev)
113{
114 return false;
115}
116
117static inline struct wakeup_source *wakeup_source_create(const char *name) 112static inline struct wakeup_source *wakeup_source_create(const char *name)
118{ 113{
119 return NULL; 114 return NULL;
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
134 129
135static inline int device_wakeup_enable(struct device *dev) 130static inline int device_wakeup_enable(struct device *dev)
136{ 131{
137 return -EINVAL; 132 dev->power.should_wakeup = true;
133 return 0;
138} 134}
139 135
140static inline int device_wakeup_disable(struct device *dev) 136static inline int device_wakeup_disable(struct device *dev)
141{ 137{
138 dev->power.should_wakeup = false;
142 return 0; 139 return 0;
143} 140}
144 141
145static inline int device_init_wakeup(struct device *dev, bool val) 142static inline int device_set_wakeup_enable(struct device *dev, bool enable)
146{ 143{
147 dev->power.can_wakeup = val; 144 dev->power.should_wakeup = enable;
148 return val ? -EINVAL : 0; 145 return 0;
149} 146}
150 147
148static inline int device_init_wakeup(struct device *dev, bool val)
149{
150 device_set_wakeup_capable(dev, val);
151 device_set_wakeup_enable(dev, val);
152 return 0;
153}
151 154
152static inline int device_set_wakeup_enable(struct device *dev, bool enable) 155static inline bool device_may_wakeup(struct device *dev)
153{ 156{
154 return -EINVAL; 157 return dev->power.can_wakeup && dev->power.should_wakeup;
155} 158}
156 159
157static inline void __pm_stay_awake(struct wakeup_source *ws) {} 160static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcbaea169..9026b30238f3 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
14#define LINUX_RIO_REGS_H 14#define LINUX_RIO_REGS_H
15 15
16/* 16/*
17 * In RapidIO, each device has a 2MB configuration space that is 17 * In RapidIO, each device has a 16MB configuration space that is
18 * accessed via maintenance transactions. Portions of configuration 18 * accessed via maintenance transactions. Portions of configuration
19 * space are standardized and/or reserved. 19 * space are standardized and/or reserved.
20 */ 20 */
21#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */
22
21#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ 23#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */
22#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ 24#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */
23#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ 25#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8651556dbd52..d3ec89fb4122 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
172struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 172struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
173 const struct thermal_cooling_device_ops *); 173 const struct thermal_cooling_device_ops *);
174void thermal_cooling_device_unregister(struct thermal_cooling_device *); 174void thermal_cooling_device_unregister(struct thermal_cooling_device *);
175
176#ifdef CONFIG_NET
175extern int generate_netlink_event(u32 orig, enum events event); 177extern int generate_netlink_event(u32 orig, enum events event);
178#else
179static inline int generate_netlink_event(u32 orig, enum events event)
180{
181 return 0;
182}
183#endif
176 184
177#endif /* __THERMAL_H__ */ 185#endif /* __THERMAL_H__ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 679a0494b5f2..1ac5786da14b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -413,7 +413,7 @@ struct station_parameters {
413 * @STATION_INFO_PLID: @plid filled 413 * @STATION_INFO_PLID: @plid filled
414 * @STATION_INFO_PLINK_STATE: @plink_state filled 414 * @STATION_INFO_PLINK_STATE: @plink_state filled
415 * @STATION_INFO_SIGNAL: @signal filled 415 * @STATION_INFO_SIGNAL: @signal filled
416 * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled 416 * @STATION_INFO_TX_BITRATE: @txrate fields are filled
417 * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) 417 * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
418 * @STATION_INFO_RX_PACKETS: @rx_packets filled 418 * @STATION_INFO_RX_PACKETS: @rx_packets filled
419 * @STATION_INFO_TX_PACKETS: @tx_packets filled 419 * @STATION_INFO_TX_PACKETS: @tx_packets filled
@@ -421,6 +421,7 @@ struct station_parameters {
421 * @STATION_INFO_TX_FAILED: @tx_failed filled 421 * @STATION_INFO_TX_FAILED: @tx_failed filled
422 * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled 422 * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
423 * @STATION_INFO_SIGNAL_AVG: @signal_avg filled 423 * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
424 * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
424 */ 425 */
425enum station_info_flags { 426enum station_info_flags {
426 STATION_INFO_INACTIVE_TIME = 1<<0, 427 STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -437,6 +438,7 @@ enum station_info_flags {
437 STATION_INFO_TX_FAILED = 1<<11, 438 STATION_INFO_TX_FAILED = 1<<11,
438 STATION_INFO_RX_DROP_MISC = 1<<12, 439 STATION_INFO_RX_DROP_MISC = 1<<12,
439 STATION_INFO_SIGNAL_AVG = 1<<13, 440 STATION_INFO_SIGNAL_AVG = 1<<13,
441 STATION_INFO_RX_BITRATE = 1<<14,
440}; 442};
441 443
442/** 444/**
@@ -506,6 +508,7 @@ struct station_info {
506 s8 signal; 508 s8 signal;
507 s8 signal_avg; 509 s8 signal_avg;
508 struct rate_info txrate; 510 struct rate_info txrate;
511 struct rate_info rxrate;
509 u32 rx_packets; 512 u32 rx_packets;
510 u32 tx_packets; 513 u32 tx_packets;
511 u32 tx_retries; 514 u32 tx_retries;
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index a8e7852b10ab..e5983c9053dc 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -43,6 +43,8 @@ struct dcbnl_rtnl_ops {
43 int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); 43 int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
44 int (*ieee_getapp) (struct net_device *, struct dcb_app *); 44 int (*ieee_getapp) (struct net_device *, struct dcb_app *);
45 int (*ieee_setapp) (struct net_device *, struct dcb_app *); 45 int (*ieee_setapp) (struct net_device *, struct dcb_app *);
46 int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *);
47 int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *);
46 48
47 /* CEE std */ 49 /* CEE std */
48 u8 (*getstate)(struct net_device *); 50 u8 (*getstate)(struct net_device *);
@@ -77,7 +79,14 @@ struct dcbnl_rtnl_ops {
77 u8 (*getdcbx)(struct net_device *); 79 u8 (*getdcbx)(struct net_device *);
78 u8 (*setdcbx)(struct net_device *, u8); 80 u8 (*setdcbx)(struct net_device *, u8);
79 81
82 /* peer apps */
83 int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *,
84 u16 *);
85 int (*peer_getapptable)(struct net_device *, struct dcb_app *);
80 86
87 /* CEE peer */
88 int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
89 int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
81}; 90};
82 91
83#endif /* __NET_DCBNL_H__ */ 92#endif /* __NET_DCBNL_H__ */
diff --git a/include/net/dst.h b/include/net/dst.h
index 4fedffd7c56f..2a46cbaef92d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -421,29 +421,22 @@ extern void dst_init(void);
421 421
422/* Flags for xfrm_lookup flags argument. */ 422/* Flags for xfrm_lookup flags argument. */
423enum { 423enum {
424 XFRM_LOOKUP_WAIT = 1 << 0, 424 XFRM_LOOKUP_ICMP = 1 << 0,
425 XFRM_LOOKUP_ICMP = 1 << 1,
426}; 425};
427 426
428struct flowi; 427struct flowi;
429#ifndef CONFIG_XFRM 428#ifndef CONFIG_XFRM
430static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 429static inline struct dst_entry *xfrm_lookup(struct net *net,
431 const struct flowi *fl, struct sock *sk, 430 struct dst_entry *dst_orig,
432 int flags) 431 const struct flowi *fl, struct sock *sk,
432 int flags)
433{ 433{
434 return 0; 434 return dst_orig;
435} 435}
436static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
437 const struct flowi *fl, struct sock *sk,
438 int flags)
439{
440 return 0;
441}
442#else 436#else
443extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 437extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
444 const struct flowi *fl, struct sock *sk, int flags); 438 const struct flowi *fl, struct sock *sk,
445extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 439 int flags);
446 const struct flowi *fl, struct sock *sk, int flags);
447#endif 440#endif
448#endif 441#endif
449 442
diff --git a/include/net/flow.h b/include/net/flow.h
index f2080e65276d..fd0413873b8e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -50,6 +50,7 @@ struct flowi {
50 __u8 flags; 50 __u8 flags;
51#define FLOWI_FLAG_ANYSRC 0x01 51#define FLOWI_FLAG_ANYSRC 0x01
52#define FLOWI_FLAG_PRECOW_METRICS 0x02 52#define FLOWI_FLAG_PRECOW_METRICS 0x02
53#define FLOWI_FLAG_CAN_SLEEP 0x04
53 union { 54 union {
54 struct { 55 struct {
55 __be16 sport; 56 __be16 sport;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 6e6dfd757682..7a37369f8ea3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -86,6 +86,19 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
86 return (struct inet_request_sock *)sk; 86 return (struct inet_request_sock *)sk;
87} 87}
88 88
89struct inet_cork {
90 unsigned int flags;
91 unsigned int fragsize;
92 struct ip_options *opt;
93 struct dst_entry *dst;
94 int length; /* Total length of all frames */
95 __be32 addr;
96 struct flowi fl;
97 struct page *page;
98 u32 off;
99 u8 tx_flags;
100};
101
89struct ip_mc_socklist; 102struct ip_mc_socklist;
90struct ipv6_pinfo; 103struct ipv6_pinfo;
91struct rtable; 104struct rtable;
@@ -143,15 +156,7 @@ struct inet_sock {
143 int mc_index; 156 int mc_index;
144 __be32 mc_addr; 157 __be32 mc_addr;
145 struct ip_mc_socklist __rcu *mc_list; 158 struct ip_mc_socklist __rcu *mc_list;
146 struct { 159 struct inet_cork cork;
147 unsigned int flags;
148 unsigned int fragsize;
149 struct ip_options *opt;
150 struct dst_entry *dst;
151 int length; /* Total length of all frames */
152 __be32 addr;
153 struct flowi fl;
154 } cork;
155}; 160};
156 161
157#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ 162#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
diff --git a/include/net/ip.h b/include/net/ip.h
index 67fac78a186b..a4f631108c54 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -116,8 +116,24 @@ extern int ip_append_data(struct sock *sk,
116extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb); 116extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
117extern ssize_t ip_append_page(struct sock *sk, struct page *page, 117extern ssize_t ip_append_page(struct sock *sk, struct page *page,
118 int offset, size_t size, int flags); 118 int offset, size_t size, int flags);
119extern struct sk_buff *__ip_make_skb(struct sock *sk,
120 struct sk_buff_head *queue,
121 struct inet_cork *cork);
122extern int ip_send_skb(struct sk_buff *skb);
119extern int ip_push_pending_frames(struct sock *sk); 123extern int ip_push_pending_frames(struct sock *sk);
120extern void ip_flush_pending_frames(struct sock *sk); 124extern void ip_flush_pending_frames(struct sock *sk);
125extern struct sk_buff *ip_make_skb(struct sock *sk,
126 int getfrag(void *from, char *to, int offset, int len,
127 int odd, struct sk_buff *skb),
128 void *from, int length, int transhdrlen,
129 struct ipcm_cookie *ipc,
130 struct rtable **rtp,
131 unsigned int flags);
132
133static inline struct sk_buff *ip_finish_skb(struct sock *sk)
134{
135 return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
136}
121 137
122/* datagram.c */ 138/* datagram.c */
123extern int ip4_datagram_connect(struct sock *sk, 139extern int ip4_datagram_connect(struct sock *sk,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 523a170b0ecb..3f6c943faedc 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -51,6 +51,7 @@ struct fib_nh {
51 struct fib_info *nh_parent; 51 struct fib_info *nh_parent;
52 unsigned nh_flags; 52 unsigned nh_flags;
53 unsigned char nh_scope; 53 unsigned char nh_scope;
54 unsigned char nh_cfg_scope;
54#ifdef CONFIG_IP_ROUTE_MULTIPATH 55#ifdef CONFIG_IP_ROUTE_MULTIPATH
55 int nh_weight; 56 int nh_weight;
56 int nh_power; 57 int nh_power;
@@ -60,6 +61,7 @@ struct fib_nh {
60#endif 61#endif
61 int nh_oif; 62 int nh_oif;
62 __be32 nh_gw; 63 __be32 nh_gw;
64 __be32 nh_saddr;
63}; 65};
64 66
65/* 67/*
@@ -139,11 +141,13 @@ struct fib_result_nl {
139 141
140#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 142#endif /* CONFIG_IP_ROUTE_MULTIPATH */
141 143
142#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res)) 144#define FIB_RES_SADDR(res) (FIB_RES_NH(res).nh_saddr)
143#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) 145#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
144#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) 146#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
145#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) 147#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
146 148
149#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res))
150
147struct fib_table { 151struct fib_table {
148 struct hlist_node tb_hlist; 152 struct hlist_node tb_hlist;
149 u32 tb_id; 153 u32 tb_id;
@@ -224,8 +228,8 @@ extern void fib_select_default(struct fib_result *res);
224extern int ip_fib_check_default(__be32 gw, struct net_device *dev); 228extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
225extern int fib_sync_down_dev(struct net_device *dev, int force); 229extern int fib_sync_down_dev(struct net_device *dev, int force);
226extern int fib_sync_down_addr(struct net *net, __be32 local); 230extern int fib_sync_down_addr(struct net *net, __be32 local);
231extern void fib_update_nh_saddrs(struct net_device *dev);
227extern int fib_sync_up(struct net_device *dev); 232extern int fib_sync_up(struct net_device *dev);
228extern __be32 __fib_res_prefsrc(struct fib_result *res);
229extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res); 233extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
230 234
231/* Exported by fib_trie.c */ 235/* Exported by fib_trie.c */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 5d75feadf4f4..e74da41ebd1b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -494,7 +494,7 @@ struct ip_vs_conn_param {
494 * IP_VS structure allocated for each dynamically scheduled connection 494 * IP_VS structure allocated for each dynamically scheduled connection
495 */ 495 */
496struct ip_vs_conn { 496struct ip_vs_conn {
497 struct list_head c_list; /* hashed list heads */ 497 struct hlist_node c_list; /* hashed list heads */
498#ifdef CONFIG_NET_NS 498#ifdef CONFIG_NET_NS
499 struct net *net; /* Name space */ 499 struct net *net; /* Name space */
500#endif 500#endif
@@ -1019,6 +1019,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
1019extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1019extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
1020 struct ip_vs_proto_data *pd); 1020 struct ip_vs_proto_data *pd);
1021 1021
1022extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
1023
1022 1024
1023/* 1025/*
1024 * IPVS control data and functions (from ip_vs_ctl.c) 1026 * IPVS control data and functions (from ip_vs_ctl.c)
@@ -1241,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1241/* CONFIG_IP_VS_NFCT */ 1243/* CONFIG_IP_VS_NFCT */
1242#endif 1244#endif
1243 1245
1246static inline unsigned int
1247ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1248{
1249 /*
1250 * We think the overhead of processing active connections is 256
1251 * times higher than that of inactive connections in average. (This
1252 * 256 times might not be accurate, we will change it later) We
1253 * use the following formula to estimate the overhead now:
1254 * dest->activeconns*256 + dest->inactconns
1255 */
1256 return (atomic_read(&dest->activeconns) << 8) +
1257 atomic_read(&dest->inactconns);
1258}
1259
1244#endif /* __KERNEL__ */ 1260#endif /* __KERNEL__ */
1245 1261
1246#endif /* _NET_IP_VS_H */ 1262#endif /* _NET_IP_VS_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4a3cd2cd2f5e..4635a5c80967 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -89,6 +89,18 @@
89#define IPV6_ADDR_SCOPE_GLOBAL 0x0e 89#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
90 90
91/* 91/*
92 * Addr flags
93 */
94#ifdef __KERNEL__
95#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
96 ((a)->s6_addr[1] & 0x10)
97#define IPV6_ADDR_MC_FLAG_PREFIX(a) \
98 ((a)->s6_addr[1] & 0x20)
99#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
100 ((a)->s6_addr[1] & 0x40)
101#endif
102
103/*
92 * fragmentation header 104 * fragmentation header
93 */ 105 */
94 106
@@ -512,12 +524,16 @@ extern void ip6_flush_pending_frames(struct sock *sk);
512extern int ip6_dst_lookup(struct sock *sk, 524extern int ip6_dst_lookup(struct sock *sk,
513 struct dst_entry **dst, 525 struct dst_entry **dst,
514 struct flowi *fl); 526 struct flowi *fl);
515extern int ip6_dst_blackhole(struct sock *sk, 527extern struct dst_entry * ip6_dst_lookup_flow(struct sock *sk,
516 struct dst_entry **dst, 528 struct flowi *fl,
517 struct flowi *fl); 529 const struct in6_addr *final_dst,
518extern int ip6_sk_dst_lookup(struct sock *sk, 530 bool can_sleep);
519 struct dst_entry **dst, 531extern struct dst_entry * ip6_sk_dst_lookup_flow(struct sock *sk,
520 struct flowi *fl); 532 struct flowi *fl,
533 const struct in6_addr *final_dst,
534 bool can_sleep);
535extern struct dst_entry * ip6_blackhole_route(struct net *net,
536 struct dst_entry *orig_dst);
521 537
522/* 538/*
523 * skb processing functions 539 * skb processing functions
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 8fcd1691cfb7..2b072fa99399 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -599,9 +599,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
599 * the frame. 599 * the frame.
600 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on 600 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
601 * the frame. 601 * the frame.
602 * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) 602 * @RX_FLAG_MACTIME_MPDU: The timestamp passed in the RX status (@mactime
603 * is valid. This is useful in monitor mode and necessary for beacon frames 603 * field) is valid and contains the time the first symbol of the MPDU
604 * to enable IBSS merging. 604 * was received. This is useful in monitor mode and for proper IBSS
605 * merging.
605 * @RX_FLAG_SHORTPRE: Short preamble was used for this frame 606 * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
606 * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index 607 * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
607 * @RX_FLAG_40MHZ: HT40 (40 MHz) was used 608 * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
@@ -614,7 +615,7 @@ enum mac80211_rx_flags {
614 RX_FLAG_IV_STRIPPED = 1<<4, 615 RX_FLAG_IV_STRIPPED = 1<<4,
615 RX_FLAG_FAILED_FCS_CRC = 1<<5, 616 RX_FLAG_FAILED_FCS_CRC = 1<<5,
616 RX_FLAG_FAILED_PLCP_CRC = 1<<6, 617 RX_FLAG_FAILED_PLCP_CRC = 1<<6,
617 RX_FLAG_TSFT = 1<<7, 618 RX_FLAG_MACTIME_MPDU = 1<<7,
618 RX_FLAG_SHORTPRE = 1<<8, 619 RX_FLAG_SHORTPRE = 1<<8,
619 RX_FLAG_HT = 1<<9, 620 RX_FLAG_HT = 1<<9,
620 RX_FLAG_40MHZ = 1<<10, 621 RX_FLAG_40MHZ = 1<<10,
@@ -1798,9 +1799,14 @@ enum ieee80211_ampdu_mlme_action {
1798 * ieee80211_remain_on_channel_expired(). This callback may sleep. 1799 * ieee80211_remain_on_channel_expired(). This callback may sleep.
1799 * @cancel_remain_on_channel: Requests that an ongoing off-channel period is 1800 * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
1800 * aborted before it expires. This callback may sleep. 1801 * aborted before it expires. This callback may sleep.
1802 * @offchannel_tx: Transmit frame on another channel, wait for a response
1803 * and return. Reliable TX status must be reported for the frame. If the
1804 * return value is 1, then the @remain_on_channel will be used with a
1805 * regular transmission (if supported.)
1806 * @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
1801 */ 1807 */
1802struct ieee80211_ops { 1808struct ieee80211_ops {
1803 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1809 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
1804 int (*start)(struct ieee80211_hw *hw); 1810 int (*start)(struct ieee80211_hw *hw);
1805 void (*stop)(struct ieee80211_hw *hw); 1811 void (*stop)(struct ieee80211_hw *hw);
1806 int (*add_interface)(struct ieee80211_hw *hw, 1812 int (*add_interface)(struct ieee80211_hw *hw,
@@ -1877,6 +1883,11 @@ struct ieee80211_ops {
1877 enum nl80211_channel_type channel_type, 1883 enum nl80211_channel_type channel_type,
1878 int duration); 1884 int duration);
1879 int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); 1885 int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
1886 int (*offchannel_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
1887 struct ieee80211_channel *chan,
1888 enum nl80211_channel_type channel_type,
1889 unsigned int wait);
1890 int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
1880}; 1891};
1881 1892
1882/** 1893/**
diff --git a/include/net/route.h b/include/net/route.h
index b3f89ad04e0b..9257f5f17337 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -53,16 +53,20 @@ struct fib_info;
53struct rtable { 53struct rtable {
54 struct dst_entry dst; 54 struct dst_entry dst;
55 55
56 /* Cache lookup keys */ 56 /* Lookup key. */
57 struct flowi fl; 57 __be32 rt_key_dst;
58 __be32 rt_key_src;
58 59
59 int rt_genid; 60 int rt_genid;
60 unsigned rt_flags; 61 unsigned rt_flags;
61 __u16 rt_type; 62 __u16 rt_type;
63 __u8 rt_tos;
62 64
63 __be32 rt_dst; /* Path destination */ 65 __be32 rt_dst; /* Path destination */
64 __be32 rt_src; /* Path source */ 66 __be32 rt_src; /* Path source */
65 int rt_iif; 67 int rt_iif;
68 int rt_oif;
69 __u32 rt_mark;
66 70
67 /* Info on neighbour */ 71 /* Info on neighbour */
68 __be32 rt_gateway; 72 __be32 rt_gateway;
@@ -76,12 +80,12 @@ struct rtable {
76 80
77static inline bool rt_is_input_route(struct rtable *rt) 81static inline bool rt_is_input_route(struct rtable *rt)
78{ 82{
79 return rt->fl.iif != 0; 83 return rt->rt_iif != 0;
80} 84}
81 85
82static inline bool rt_is_output_route(struct rtable *rt) 86static inline bool rt_is_output_route(struct rtable *rt)
83{ 87{
84 return rt->fl.iif == 0; 88 return rt->rt_iif == 0;
85} 89}
86 90
87struct ip_rt_acct { 91struct ip_rt_acct {
@@ -118,9 +122,15 @@ extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw,
118 __be32 src, struct net_device *dev); 122 __be32 src, struct net_device *dev);
119extern void rt_cache_flush(struct net *net, int how); 123extern void rt_cache_flush(struct net *net, int how);
120extern void rt_cache_flush_batch(struct net *net); 124extern void rt_cache_flush_batch(struct net *net);
121extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp); 125extern struct rtable *__ip_route_output_key(struct net *, const struct flowi *flp);
122extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp); 126extern struct rtable *ip_route_output_flow(struct net *, struct flowi *flp,
123extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags); 127 struct sock *sk);
128extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
129
130static inline struct rtable *ip_route_output_key(struct net *net, struct flowi *flp)
131{
132 return ip_route_output_flow(net, flp, NULL);
133}
124 134
125extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src, 135extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
126 u8 tos, struct net_device *devin, bool noref); 136 u8 tos, struct net_device *devin, bool noref);
@@ -165,10 +175,10 @@ static inline char rt_tos2priority(u8 tos)
165 return ip_tos2prio[IPTOS_TOS(tos)>>1]; 175 return ip_tos2prio[IPTOS_TOS(tos)>>1];
166} 176}
167 177
168static inline int ip_route_connect(struct rtable **rp, __be32 dst, 178static inline struct rtable *ip_route_connect(__be32 dst, __be32 src, u32 tos,
169 __be32 src, u32 tos, int oif, u8 protocol, 179 int oif, u8 protocol,
170 __be16 sport, __be16 dport, struct sock *sk, 180 __be16 sport, __be16 dport,
171 int flags) 181 struct sock *sk, bool can_sleep)
172{ 182{
173 struct flowi fl = { .oif = oif, 183 struct flowi fl = { .oif = oif,
174 .mark = sk->sk_mark, 184 .mark = sk->sk_mark,
@@ -178,38 +188,40 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
178 .proto = protocol, 188 .proto = protocol,
179 .fl_ip_sport = sport, 189 .fl_ip_sport = sport,
180 .fl_ip_dport = dport }; 190 .fl_ip_dport = dport };
181 int err;
182 struct net *net = sock_net(sk); 191 struct net *net = sock_net(sk);
192 struct rtable *rt;
183 193
184 if (inet_sk(sk)->transparent) 194 if (inet_sk(sk)->transparent)
185 fl.flags |= FLOWI_FLAG_ANYSRC; 195 fl.flags |= FLOWI_FLAG_ANYSRC;
186 if (protocol == IPPROTO_TCP) 196 if (protocol == IPPROTO_TCP)
187 fl.flags |= FLOWI_FLAG_PRECOW_METRICS; 197 fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
198 if (can_sleep)
199 fl.flags |= FLOWI_FLAG_CAN_SLEEP;
188 200
189 if (!dst || !src) { 201 if (!dst || !src) {
190 err = __ip_route_output_key(net, rp, &fl); 202 rt = __ip_route_output_key(net, &fl);
191 if (err) 203 if (IS_ERR(rt))
192 return err; 204 return rt;
193 fl.fl4_dst = (*rp)->rt_dst; 205 fl.fl4_dst = rt->rt_dst;
194 fl.fl4_src = (*rp)->rt_src; 206 fl.fl4_src = rt->rt_src;
195 ip_rt_put(*rp); 207 ip_rt_put(rt);
196 *rp = NULL;
197 } 208 }
198 security_sk_classify_flow(sk, &fl); 209 security_sk_classify_flow(sk, &fl);
199 return ip_route_output_flow(net, rp, &fl, sk, flags); 210 return ip_route_output_flow(net, &fl, sk);
200} 211}
201 212
202static inline int ip_route_newports(struct rtable **rp, u8 protocol, 213static inline struct rtable *ip_route_newports(struct rtable *rt,
203 __be16 orig_sport, __be16 orig_dport, 214 u8 protocol, __be16 orig_sport,
204 __be16 sport, __be16 dport, struct sock *sk) 215 __be16 orig_dport, __be16 sport,
216 __be16 dport, struct sock *sk)
205{ 217{
206 if (sport != orig_sport || dport != orig_dport) { 218 if (sport != orig_sport || dport != orig_dport) {
207 struct flowi fl = { .oif = (*rp)->fl.oif, 219 struct flowi fl = { .oif = rt->rt_oif,
208 .mark = (*rp)->fl.mark, 220 .mark = rt->rt_mark,
209 .fl4_dst = (*rp)->fl.fl4_dst, 221 .fl4_dst = rt->rt_key_dst,
210 .fl4_src = (*rp)->fl.fl4_src, 222 .fl4_src = rt->rt_key_src,
211 .fl4_tos = (*rp)->fl.fl4_tos, 223 .fl4_tos = rt->rt_tos,
212 .proto = (*rp)->fl.proto, 224 .proto = protocol,
213 .fl_ip_sport = sport, 225 .fl_ip_sport = sport,
214 .fl_ip_dport = dport }; 226 .fl_ip_dport = dport };
215 227
@@ -217,12 +229,11 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
217 fl.flags |= FLOWI_FLAG_ANYSRC; 229 fl.flags |= FLOWI_FLAG_ANYSRC;
218 if (protocol == IPPROTO_TCP) 230 if (protocol == IPPROTO_TCP)
219 fl.flags |= FLOWI_FLAG_PRECOW_METRICS; 231 fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
220 ip_rt_put(*rp); 232 ip_rt_put(rt);
221 *rp = NULL;
222 security_sk_classify_flow(sk, &fl); 233 security_sk_classify_flow(sk, &fl);
223 return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0); 234 return ip_route_output_flow(sock_net(sk), &fl, sk);
224 } 235 }
225 return 0; 236 return rt;
226} 237}
227 238
228extern void rt_bind_peer(struct rtable *rt, int create); 239extern void rt_bind_peer(struct rtable *rt, int create);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 16626a04cb03..a9505b6a18e3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -83,6 +83,7 @@ struct Qdisc {
83 struct gnet_stats_queue qstats; 83 struct gnet_stats_queue qstats;
84 struct rcu_head rcu_head; 84 struct rcu_head rcu_head;
85 spinlock_t busylock; 85 spinlock_t busylock;
86 u32 limit;
86}; 87};
87 88
88static inline bool qdisc_is_running(const struct Qdisc *qdisc) 89static inline bool qdisc_is_running(const struct Qdisc *qdisc)
@@ -218,7 +219,7 @@ struct tcf_proto {
218 219
219struct qdisc_skb_cb { 220struct qdisc_skb_cb {
220 unsigned int pkt_len; 221 unsigned int pkt_len;
221 char data[]; 222 long data[];
222}; 223};
223 224
224static inline int qdisc_qlen(struct Qdisc *q) 225static inline int qdisc_qlen(struct Qdisc *q)
diff --git a/include/net/udp.h b/include/net/udp.h
index e82f3a8c0f8f..67ea6fcb3ec0 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -144,6 +144,17 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
144 return csum; 144 return csum;
145} 145}
146 146
147static inline __wsum udp_csum(struct sk_buff *skb)
148{
149 __wsum csum = csum_partial(skb_transport_header(skb),
150 sizeof(struct udphdr), skb->csum);
151
152 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
153 csum = csum_add(csum, skb->csum);
154 }
155 return csum;
156}
157
147/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ 158/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
148static inline void udp_lib_hash(struct sock *sk) 159static inline void udp_lib_hash(struct sock *sk)
149{ 160{
diff --git a/include/net/udplite.h b/include/net/udplite.h
index afdffe607b24..673a024c6b2a 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -115,6 +115,18 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
115 return csum; 115 return csum;
116} 116}
117 117
118static inline __wsum udplite_csum(struct sk_buff *skb)
119{
120 struct sock *sk = skb->sk;
121 int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
122 const int off = skb_transport_offset(skb);
123 const int len = skb->len - off;
124
125 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
126
127 return skb_checksum(skb, off, min(cscov, len), 0);
128}
129
118extern void udplite4_register(void); 130extern void udplite4_register(void);
119extern int udplite_get_port(struct sock *sk, unsigned short snum, 131extern int udplite_get_port(struct sock *sk, unsigned short snum,
120 int (*scmp)(const struct sock *, const struct sock *)); 132 int (*scmp)(const struct sock *, const struct sock *));
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index efded23dc4ae..d5dcf3974636 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -280,6 +280,7 @@ struct xfrm_policy_afinfo {
280 int (*fill_dst)(struct xfrm_dst *xdst, 280 int (*fill_dst)(struct xfrm_dst *xdst,
281 struct net_device *dev, 281 struct net_device *dev,
282 const struct flowi *fl); 282 const struct flowi *fl);
283 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
283}; 284};
284 285
285extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); 286extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8479b66c067b..3fd5064dd43a 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
261#define CONF_ENABLE_ESR 0x0008 261#define CONF_ENABLE_ESR 0x0008
262#define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ 262#define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ
263 * (CONF_ENABLE_IRQ) in use */ 263 * (CONF_ENABLE_IRQ) in use */
264#define CONF_ENABLE_ZVCARD 0x0020
264 265
265/* flags used by pcmcia_loop_config() autoconfiguration */ 266/* flags used by pcmcia_loop_config() autoconfiguration */
266#define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ 267#define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h
index b4a0db2307ef..1eeebd534f7e 100644
--- a/include/sound/wm8903.h
+++ b/include/sound/wm8903.h
@@ -17,13 +17,9 @@
17/* 17/*
18 * R6 (0x06) - Mic Bias Control 0 18 * R6 (0x06) - Mic Bias Control 0
19 */ 19 */
20#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ 20#define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */
21#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ 21#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */
22#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ 22#define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */
23#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */
24#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
25#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
26#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
27#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ 23#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
28#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ 24#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
29#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ 25#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index aba421d68f6f..78f18adb49c8 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
31 0 : blk_rq_sectors(rq); 31 0 : blk_rq_sectors(rq);
32 __entry->errors = rq->errors; 32 __entry->errors = rq->errors;
33 33
34 blk_fill_rwbs_rq(__entry->rwbs, rq); 34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
35 blk_dump_cmd(__get_str(cmd), rq); 35 blk_dump_cmd(__get_str(cmd), rq);
36 ), 36 ),
37 37
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0; 119 blk_rq_bytes(rq) : 0;
120 120
121 blk_fill_rwbs_rq(__entry->rwbs, rq); 121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
122 blk_dump_cmd(__get_str(cmd), rq); 122 blk_dump_cmd(__get_str(cmd), rq);
123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
124 ), 124 ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
563 __entry->nr_sector = blk_rq_sectors(rq); 563 __entry->nr_sector = blk_rq_sectors(rq);
564 __entry->old_dev = dev; 564 __entry->old_dev = dev;
565 __entry->old_sector = from; 565 __entry->old_sector = from;
566 blk_fill_rwbs_rq(__entry->rwbs, rq); 566 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
567 ), 567 ),
568 568
569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/kernel/audit.c b/kernel/audit.c
index 162e88e33bc9..939500317066 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -673,9 +673,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
673 673
674 pid = NETLINK_CREDS(skb)->pid; 674 pid = NETLINK_CREDS(skb)->pid;
675 uid = NETLINK_CREDS(skb)->uid; 675 uid = NETLINK_CREDS(skb)->uid;
676 loginuid = NETLINK_CB(skb).loginuid; 676 loginuid = audit_get_loginuid(current);
677 sessionid = NETLINK_CB(skb).sessionid; 677 sessionid = audit_get_sessionid(current);
678 sid = NETLINK_CB(skb).sid; 678 security_task_getsecid(current, &sid);
679 seq = nlh->nlmsg_seq; 679 seq = nlh->nlmsg_seq;
680 data = NLMSG_DATA(nlh); 680 data = NLMSG_DATA(nlh);
681 681
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index add2819af71b..f8277c80d678 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1238,6 +1238,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
1238 for (i = 0; i < rule->field_count; i++) { 1238 for (i = 0; i < rule->field_count; i++) {
1239 struct audit_field *f = &rule->fields[i]; 1239 struct audit_field *f = &rule->fields[i];
1240 int result = 0; 1240 int result = 0;
1241 u32 sid;
1241 1242
1242 switch (f->type) { 1243 switch (f->type) {
1243 case AUDIT_PID: 1244 case AUDIT_PID:
@@ -1250,19 +1251,22 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
1250 result = audit_comparator(cb->creds.gid, f->op, f->val); 1251 result = audit_comparator(cb->creds.gid, f->op, f->val);
1251 break; 1252 break;
1252 case AUDIT_LOGINUID: 1253 case AUDIT_LOGINUID:
1253 result = audit_comparator(cb->loginuid, f->op, f->val); 1254 result = audit_comparator(audit_get_loginuid(current),
1255 f->op, f->val);
1254 break; 1256 break;
1255 case AUDIT_SUBJ_USER: 1257 case AUDIT_SUBJ_USER:
1256 case AUDIT_SUBJ_ROLE: 1258 case AUDIT_SUBJ_ROLE:
1257 case AUDIT_SUBJ_TYPE: 1259 case AUDIT_SUBJ_TYPE:
1258 case AUDIT_SUBJ_SEN: 1260 case AUDIT_SUBJ_SEN:
1259 case AUDIT_SUBJ_CLR: 1261 case AUDIT_SUBJ_CLR:
1260 if (f->lsm_rule) 1262 if (f->lsm_rule) {
1261 result = security_audit_rule_match(cb->sid, 1263 security_task_getsecid(current, &sid);
1264 result = security_audit_rule_match(sid,
1262 f->type, 1265 f->type,
1263 f->op, 1266 f->op,
1264 f->lsm_rule, 1267 f->lsm_rule,
1265 NULL); 1268 NULL);
1269 }
1266 break; 1270 break;
1267 } 1271 }
1268 1272
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4571ae7e085a..99c3bc8a6fb4 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -3,6 +3,12 @@
3 */ 3 */
4#include <linux/irqdesc.h> 4#include <linux/irqdesc.h>
5 5
6#ifdef CONFIG_SPARSE_IRQ
7# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
8#else
9# define IRQ_BITMAP_BITS NR_IRQS
10#endif
11
6extern int noirqdebug; 12extern int noirqdebug;
7 13
8#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 14#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 282f20230e67..2039bea31bdf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS;
94EXPORT_SYMBOL_GPL(nr_irqs); 94EXPORT_SYMBOL_GPL(nr_irqs);
95 95
96static DEFINE_MUTEX(sparse_irq_lock); 96static DEFINE_MUTEX(sparse_irq_lock);
97static DECLARE_BITMAP(allocated_irqs, NR_IRQS); 97static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
98 98
99#ifdef CONFIG_SPARSE_IRQ 99#ifdef CONFIG_SPARSE_IRQ
100 100
@@ -217,6 +217,15 @@ int __init early_irq_init(void)
217 initcnt = arch_probe_nr_irqs(); 217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219 219
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
222
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
225
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
228
220 for (i = 0; i < initcnt; i++) { 229 for (i = 0; i < initcnt; i++) {
221 desc = alloc_desc(i, node); 230 desc = alloc_desc(i, node);
222 set_bit(i, allocated_irqs); 231 set_bit(i, allocated_irqs);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0587c5ceaed8..094fafe86c96 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1182,7 +1182,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1182 if (retval) 1182 if (retval)
1183 kfree(action); 1183 kfree(action);
1184 1184
1185#ifdef CONFIG_DEBUG_SHIRQ 1185#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1186 if (!retval && (irqflags & IRQF_SHARED)) { 1186 if (!retval && (irqflags & IRQF_SHARED)) {
1187 /* 1187 /*
1188 * It's a shared IRQ -- the driver ought to be prepared for it 1188 * It's a shared IRQ -- the driver ought to be prepared for it
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 891115a929aa..dc49358b73fa 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
23#ifdef CONFIG_HARDIRQS_SW_RESEND 23#ifdef CONFIG_HARDIRQS_SW_RESEND
24 24
25/* Bitmap to handle software resend of interrupts: */ 25/* Bitmap to handle software resend of interrupts: */
26static DECLARE_BITMAP(irqs_resend, NR_IRQS); 26static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
27 27
28/* 28/*
29 * Run software resends of IRQ's 29 * Run software resends of IRQ's
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..656222fcf767 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,10 @@ retry:
782 raw_spin_unlock_irq(&ctx->lock); 782 raw_spin_unlock_irq(&ctx->lock);
783} 783}
784 784
785#define MAX_INTERRUPTS (~0ULL)
786
787static void perf_log_throttle(struct perf_event *event, int enable);
788
785static int 789static int
786event_sched_in(struct perf_event *event, 790event_sched_in(struct perf_event *event,
787 struct perf_cpu_context *cpuctx, 791 struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
794 798
795 event->state = PERF_EVENT_STATE_ACTIVE; 799 event->state = PERF_EVENT_STATE_ACTIVE;
796 event->oncpu = smp_processor_id(); 800 event->oncpu = smp_processor_id();
801
802 /*
803 * Unthrottle events, since we scheduled we might have missed several
804 * ticks already, also for a heavily scheduling task there is little
805 * guarantee it'll get a tick in a timely manner.
806 */
807 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
808 perf_log_throttle(event, 1);
809 event->hw.interrupts = 0;
810 }
811
797 /* 812 /*
798 * The new state must be visible before we turn it on in the hardware: 813 * The new state must be visible before we turn it on in the hardware:
799 */ 814 */
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
1596 } 1611 }
1597} 1612}
1598 1613
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
1603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 1614static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{ 1615{
1605 u64 frequency = event->attr.sample_freq; 1616 u64 frequency = event->attr.sample_freq;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b5668..a3b5aff62606 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
601} 601}
602 602
603/*
604 * Check whether the broadcast device supports oneshot.
605 */
606bool tick_broadcast_oneshot_available(void)
607{
608 struct clock_event_device *bc = tick_broadcast_device.evtdev;
609
610 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
611}
612
603#endif 613#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80a0c43..ed228ef6f6b8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
51{ 51{
52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
53 53
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 54 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
55 return 0;
56 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
57 return 1;
58 return tick_broadcast_oneshot_available();
55} 59}
56 60
57/* 61/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefbc1f60..f65d3a723a64 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
37extern int tick_broadcast_oneshot_active(void); 37extern int tick_broadcast_oneshot_active(void);
38extern void tick_check_oneshot_broadcast(int cpu); 38extern void tick_check_oneshot_broadcast(int cpu);
39bool tick_broadcast_oneshot_available(void);
39# else /* BROADCAST */ 40# else /* BROADCAST */
40static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 41static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
41{ 42{
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
46static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 47static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
47static inline int tick_broadcast_oneshot_active(void) { return 0; } 48static inline int tick_broadcast_oneshot_active(void) { return 0; }
48static inline void tick_check_oneshot_broadcast(int cpu) { } 49static inline void tick_check_oneshot_broadcast(int cpu) { }
50static inline bool tick_broadcast_oneshot_available(void) { return true; }
49# endif /* !BROADCAST */ 51# endif /* !BROADCAST */
50 52
51#else /* !ONESHOT */ 53#else /* !ONESHOT */
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
76 return 0; 78 return 0;
77} 79}
78static inline int tick_broadcast_oneshot_active(void) { return 0; } 80static inline int tick_broadcast_oneshot_active(void) { return 0; }
81static inline bool tick_broadcast_oneshot_available(void) { return false; }
79#endif /* !TICK_ONESHOT */ 82#endif /* !TICK_ONESHOT */
80 83
81/* 84/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index d95721f33702..cbafed7d4f38 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1827 rwbs[i] = '\0'; 1827 rwbs[i] = '\0';
1828} 1828}
1829 1829
1830void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1831{
1832 int rw = rq->cmd_flags & 0x03;
1833 int bytes;
1834
1835 if (rq->cmd_flags & REQ_DISCARD)
1836 rw |= REQ_DISCARD;
1837
1838 if (rq->cmd_flags & REQ_SECURE)
1839 rw |= REQ_SECURE;
1840
1841 bytes = blk_rq_bytes(rq);
1842
1843 blk_fill_rwbs(rwbs, rw, bytes);
1844}
1845
1846#endif /* CONFIG_EVENT_TRACING */ 1830#endif /* CONFIG_EVENT_TRACING */
1847 1831
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
148{ 148{
149 int i, len = 0; 149 int i, len = 0;
150 150
151 for (i = 0; i < n; i++) { 151 for (i = 0; i < n; i++, p++) {
152 if (p->len) 152 if (p->len)
153 len += nla_total_size(p->len); 153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type]) 154 else if (nla_attr_minlen[p->type])
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
686 /* 686 /*
687 * Ensure that the address returned is DMA'ble 687 * Ensure that the address returned is DMA'ble
688 */ 688 */
689 if (!dma_capable(dev, dev_addr, size)) 689 if (!dma_capable(dev, dev_addr, size)) {
690 panic("map_single: bounce buffer is not DMA'ble"); 690 swiotlb_tbl_unmap_single(dev, map, size, dir);
691 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
692 }
691 693
692 return dev_addr; 694 return dev_addr;
693} 695}
diff --git a/mm/memory.c b/mm/memory.c
index 8e8c18324863..5823698c2b71 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
2648 details.last_index = ULONG_MAX; 2648 details.last_index = ULONG_MAX;
2649 details.i_mmap_lock = &mapping->i_mmap_lock; 2649 details.i_mmap_lock = &mapping->i_mmap_lock;
2650 2650
2651 mutex_lock(&mapping->unmap_mutex);
2651 spin_lock(&mapping->i_mmap_lock); 2652 spin_lock(&mapping->i_mmap_lock);
2652 2653
2653 /* Protect against endless unmapping loops */ 2654 /* Protect against endless unmapping loops */
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
2664 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2665 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2665 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2666 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2666 spin_unlock(&mapping->i_mmap_lock); 2667 spin_unlock(&mapping->i_mmap_lock);
2668 mutex_unlock(&mapping->unmap_mutex);
2667} 2669}
2668EXPORT_SYMBOL(unmap_mapping_range); 2670EXPORT_SYMBOL(unmap_mapping_range);
2669 2671
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 368fc9d23610..49355a970be2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1830 if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 1830 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1831 unsigned nid; 1831 unsigned nid;
1832 1832
1833 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1833 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1834 mpol_cond_put(pol); 1834 mpol_cond_put(pol);
1835 page = alloc_page_interleave(gfp, order, nid); 1835 page = alloc_page_interleave(gfp, order, nid);
1836 put_mems_allowed(); 1836 put_mems_allowed();
diff --git a/mm/migrate.c b/mm/migrate.c
index 766115253807..352de555626c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1287 return -EPERM; 1287 return -EPERM;
1288 1288
1289 /* Find the mm_struct */ 1289 /* Find the mm_struct */
1290 read_lock(&tasklist_lock); 1290 rcu_read_lock();
1291 task = pid ? find_task_by_vpid(pid) : current; 1291 task = pid ? find_task_by_vpid(pid) : current;
1292 if (!task) { 1292 if (!task) {
1293 read_unlock(&tasklist_lock); 1293 rcu_read_unlock();
1294 return -ESRCH; 1294 return -ESRCH;
1295 } 1295 }
1296 mm = get_task_mm(task); 1296 mm = get_task_mm(task);
1297 read_unlock(&tasklist_lock); 1297 rcu_read_unlock();
1298 1298
1299 if (!mm) 1299 if (!mm)
1300 return -EINVAL; 1300 return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9925b6391b80..1de98d492ddc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
94 */ 94 */
95 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
96 spin_lock(&mapping->i_mmap_lock); 96 spin_lock(&mapping->i_mmap_lock);
97 if (new_vma->vm_truncate_count && 97 new_vma->vm_truncate_count = 0;
98 new_vma->vm_truncate_count != vma->vm_truncate_count)
99 new_vma->vm_truncate_count = 0;
100 } 98 }
101 99
102 /* 100 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a873e61e312e..cdef1d4b4e47 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5376 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 5376 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5377 unsigned long check = pfn + iter; 5377 unsigned long check = pfn + iter;
5378 5378
5379 if (!pfn_valid_within(check)) { 5379 if (!pfn_valid_within(check))
5380 iter++;
5381 continue; 5380 continue;
5382 } 5381
5383 page = pfn_to_page(check); 5382 page = pfn_to_page(check);
5384 if (!page_count(page)) { 5383 if (!page_count(page)) {
5385 if (PageBuddy(page)) 5384 if (PageBuddy(page))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 07a458d72fa8..0341c5700e34 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1940 1940
1941 error = -EINVAL; 1941 error = -EINVAL;
1942 if (S_ISBLK(inode->i_mode)) { 1942 if (S_ISBLK(inode->i_mode)) {
1943 bdev = I_BDEV(inode); 1943 bdev = bdgrab(I_BDEV(inode));
1944 error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, 1944 error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1945 sys_swapon); 1945 sys_swapon);
1946 if (error < 0) { 1946 if (error < 0) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 49feb46e77b8..d64296be00d3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
225 next = start; 225 next = start;
226 while (next <= end && 226 while (next <= end &&
227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
228 mem_cgroup_uncharge_start();
228 for (i = 0; i < pagevec_count(&pvec); i++) { 229 for (i = 0; i < pagevec_count(&pvec); i++) {
229 struct page *page = pvec.pages[i]; 230 struct page *page = pvec.pages[i];
230 pgoff_t page_index = page->index; 231 pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
247 unlock_page(page); 248 unlock_page(page);
248 } 249 }
249 pagevec_release(&pvec); 250 pagevec_release(&pvec);
251 mem_cgroup_uncharge_end();
250 cond_resched(); 252 cond_resched();
251 } 253 }
252 254
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17497d0cd8b9..6771ea70bfe7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
1841 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1841 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1842 return false; 1842 return false;
1843 1843
1844 /* 1844 /* Consider stopping depending on scan and reclaim activity */
1845 * If we failed to reclaim and have scanned the full list, stop. 1845 if (sc->gfp_mask & __GFP_REPEAT) {
1846 * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far 1846 /*
1847 * faster but obviously would be less likely to succeed 1847 * For __GFP_REPEAT allocations, stop reclaiming if the
1848 * allocation. If this is desirable, use GFP_REPEAT to decide 1848 * full LRU list has been scanned and we are still failing
1849 * if both reclaimed and scanned should be checked or just 1849 * to reclaim pages. This full LRU scan is potentially
1850 * reclaimed 1850 * expensive but a __GFP_REPEAT caller really wants to succeed
1851 */ 1851 */
1852 if (!nr_reclaimed && !nr_scanned) 1852 if (!nr_reclaimed && !nr_scanned)
1853 return false; 1853 return false;
1854 } else {
1855 /*
1856 * For non-__GFP_REPEAT allocations which can presumably
1857 * fail without consequence, stop if we failed to reclaim
1858 * any pages from the last SWAP_CLUSTER_MAX number of
1859 * pages that were scanned. This will return to the
1860 * caller faster at the risk reclaim/compaction and
1861 * the resulting allocation attempt fails
1862 */
1863 if (!nr_reclaimed)
1864 return false;
1865 }
1854 1866
1855 /* 1867 /*
1856 * If we have not reclaimed enough pages for compaction and the 1868 * If we have not reclaimed enough pages for compaction and the
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index be737539f34d..ae610f046de5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -625,6 +625,19 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
625 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); 625 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
626 return rc; 626 return rc;
627} 627}
628
629static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
630 struct scatterlist *sgl, unsigned int sgc)
631{
632 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
633 const struct net_device_ops *ops = real_dev->netdev_ops;
634 int rc = 0;
635
636 if (ops->ndo_fcoe_ddp_target)
637 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
638
639 return rc;
640}
628#endif 641#endif
629 642
630static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 643static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
@@ -858,6 +871,7 @@ static const struct net_device_ops vlan_netdev_ops = {
858 .ndo_fcoe_enable = vlan_dev_fcoe_enable, 871 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
859 .ndo_fcoe_disable = vlan_dev_fcoe_disable, 872 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
860 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 873 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
874 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
861#endif 875#endif
862}; 876};
863 877
diff --git a/net/atm/clip.c b/net/atm/clip.c
index d257da50fcfb..810a1294eddb 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -520,9 +520,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
520 unlink_clip_vcc(clip_vcc); 520 unlink_clip_vcc(clip_vcc);
521 return 0; 521 return 0;
522 } 522 }
523 error = ip_route_output_key(&init_net, &rt, &fl); 523 rt = ip_route_output_key(&init_net, &fl);
524 if (error) 524 if (IS_ERR(rt))
525 return error; 525 return PTR_ERR(rt);
526 neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1); 526 neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
527 ip_rt_put(rt); 527 ip_rt_put(rt);
528 if (!neigh) 528 if (!neigh)
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 1997725a243b..af45d6b2031f 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
35 int packet_len, 35 int packet_len,
36 unsigned long send_time, 36 unsigned long send_time,
37 bool directlink, 37 bool directlink,
38 struct batman_if *if_incoming, 38 struct hard_iface *if_incoming,
39 struct forw_packet *forw_packet) 39 struct forw_packet *forw_packet)
40{ 40{
41 struct batman_packet *batman_packet = 41 struct batman_packet *batman_packet =
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
99/* create a new aggregated packet and add this packet to it */ 99/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
101 unsigned long send_time, bool direct_link, 101 unsigned long send_time, bool direct_link,
102 struct batman_if *if_incoming, 102 struct hard_iface *if_incoming,
103 int own_packet) 103 int own_packet)
104{ 104{
105 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 105 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
188 188
189void add_bat_packet_to_list(struct bat_priv *bat_priv, 189void add_bat_packet_to_list(struct bat_priv *bat_priv,
190 unsigned char *packet_buff, int packet_len, 190 unsigned char *packet_buff, int packet_len,
191 struct batman_if *if_incoming, char own_packet, 191 struct hard_iface *if_incoming, char own_packet,
192 unsigned long send_time) 192 unsigned long send_time)
193{ 193{
194 /** 194 /**
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
247 247
248/* unpack the aggregated packets and process them one by one */ 248/* unpack the aggregated packets and process them one by one */
249void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 249void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
250 int packet_len, struct batman_if *if_incoming) 250 int packet_len, struct hard_iface *if_incoming)
251{ 251{
252 struct batman_packet *batman_packet; 252 struct batman_packet *batman_packet;
253 int buff_pos = 0; 253 int buff_pos = 0;
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 6ce305b40017..062204289d1f 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
35 35
36void add_bat_packet_to_list(struct bat_priv *bat_priv, 36void add_bat_packet_to_list(struct bat_priv *bat_priv,
37 unsigned char *packet_buff, int packet_len, 37 unsigned char *packet_buff, int packet_len,
38 struct batman_if *if_incoming, char own_packet, 38 struct hard_iface *if_incoming, char own_packet,
39 unsigned long send_time); 39 unsigned long send_time);
40void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 40void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
41 int packet_len, struct batman_if *if_incoming); 41 int packet_len, struct hard_iface *if_incoming);
42 42
43#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ 43#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index f7b93a0805fe..e449bf6353e0 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
441 char *buff) 441 char *buff)
442{ 442{
443 struct net_device *net_dev = kobj_to_netdev(kobj); 443 struct net_device *net_dev = kobj_to_netdev(kobj);
444 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); 444 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
445 ssize_t length; 445 ssize_t length;
446 446
447 if (!batman_if) 447 if (!hard_iface)
448 return 0; 448 return 0;
449 449
450 length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ? 450 length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
451 "none" : batman_if->soft_iface->name); 451 "none" : hard_iface->soft_iface->name);
452 452
453 kref_put(&batman_if->refcount, hardif_free_ref); 453 hardif_free_ref(hard_iface);
454 454
455 return length; 455 return length;
456} 456}
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
459 char *buff, size_t count) 459 char *buff, size_t count)
460{ 460{
461 struct net_device *net_dev = kobj_to_netdev(kobj); 461 struct net_device *net_dev = kobj_to_netdev(kobj);
462 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); 462 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
463 int status_tmp = -1; 463 int status_tmp = -1;
464 int ret; 464 int ret = count;
465 465
466 if (!batman_if) 466 if (!hard_iface)
467 return count; 467 return count;
468 468
469 if (buff[count - 1] == '\n') 469 if (buff[count - 1] == '\n')
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
472 if (strlen(buff) >= IFNAMSIZ) { 472 if (strlen(buff) >= IFNAMSIZ) {
473 pr_err("Invalid parameter for 'mesh_iface' setting received: " 473 pr_err("Invalid parameter for 'mesh_iface' setting received: "
474 "interface name too long '%s'\n", buff); 474 "interface name too long '%s'\n", buff);
475 kref_put(&batman_if->refcount, hardif_free_ref); 475 hardif_free_ref(hard_iface);
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
@@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
481 else 481 else
482 status_tmp = IF_I_WANT_YOU; 482 status_tmp = IF_I_WANT_YOU;
483 483
484 if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) && 484 if (hard_iface->if_status == status_tmp)
485 (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) { 485 goto out;
486 kref_put(&batman_if->refcount, hardif_free_ref); 486
487 return count; 487 if ((hard_iface->soft_iface) &&
488 } 488 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
489 goto out;
489 490
490 if (status_tmp == IF_NOT_IN_USE) { 491 if (status_tmp == IF_NOT_IN_USE) {
491 rtnl_lock(); 492 rtnl_lock();
492 hardif_disable_interface(batman_if); 493 hardif_disable_interface(hard_iface);
493 rtnl_unlock(); 494 rtnl_unlock();
494 kref_put(&batman_if->refcount, hardif_free_ref); 495 goto out;
495 return count;
496 } 496 }
497 497
498 /* if the interface already is in use */ 498 /* if the interface already is in use */
499 if (batman_if->if_status != IF_NOT_IN_USE) { 499 if (hard_iface->if_status != IF_NOT_IN_USE) {
500 rtnl_lock(); 500 rtnl_lock();
501 hardif_disable_interface(batman_if); 501 hardif_disable_interface(hard_iface);
502 rtnl_unlock(); 502 rtnl_unlock();
503 } 503 }
504 504
505 ret = hardif_enable_interface(batman_if, buff); 505 ret = hardif_enable_interface(hard_iface, buff);
506 kref_put(&batman_if->refcount, hardif_free_ref);
507 506
507out:
508 hardif_free_ref(hard_iface);
508 return ret; 509 return ret;
509} 510}
510 511
@@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
512 char *buff) 513 char *buff)
513{ 514{
514 struct net_device *net_dev = kobj_to_netdev(kobj); 515 struct net_device *net_dev = kobj_to_netdev(kobj);
515 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); 516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
516 ssize_t length; 517 ssize_t length;
517 518
518 if (!batman_if) 519 if (!hard_iface)
519 return 0; 520 return 0;
520 521
521 switch (batman_if->if_status) { 522 switch (hard_iface->if_status) {
522 case IF_TO_BE_REMOVED: 523 case IF_TO_BE_REMOVED:
523 length = sprintf(buff, "disabling\n"); 524 length = sprintf(buff, "disabling\n");
524 break; 525 break;
@@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
537 break; 538 break;
538 } 539 }
539 540
540 kref_put(&batman_if->refcount, hardif_free_ref); 541 hardif_free_ref(hard_iface);
541 542
542 return length; 543 return length;
543} 544}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 429a013d2e0a..3cc43558cf9c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -28,58 +28,75 @@
28#include <linux/udp.h> 28#include <linux/udp.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30 30
31static void gw_node_free_ref(struct kref *refcount) 31static void gw_node_free_rcu(struct rcu_head *rcu)
32{ 32{
33 struct gw_node *gw_node; 33 struct gw_node *gw_node;
34 34
35 gw_node = container_of(refcount, struct gw_node, refcount); 35 gw_node = container_of(rcu, struct gw_node, rcu);
36 kfree(gw_node); 36 kfree(gw_node);
37} 37}
38 38
39static void gw_node_free_rcu(struct rcu_head *rcu) 39static void gw_node_free_ref(struct gw_node *gw_node)
40{ 40{
41 struct gw_node *gw_node; 41 if (atomic_dec_and_test(&gw_node->refcount))
42 42 call_rcu(&gw_node->rcu, gw_node_free_rcu);
43 gw_node = container_of(rcu, struct gw_node, rcu);
44 kref_put(&gw_node->refcount, gw_node_free_ref);
45} 43}
46 44
47void *gw_get_selected(struct bat_priv *bat_priv) 45void *gw_get_selected(struct bat_priv *bat_priv)
48{ 46{
49 struct gw_node *curr_gateway_tmp = bat_priv->curr_gw; 47 struct gw_node *curr_gateway_tmp;
48 struct orig_node *orig_node = NULL;
50 49
50 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
51 if (!curr_gateway_tmp) 52 if (!curr_gateway_tmp)
52 return NULL; 53 goto out;
54
55 orig_node = curr_gateway_tmp->orig_node;
56 if (!orig_node)
57 goto out;
53 58
54 return curr_gateway_tmp->orig_node; 59 if (!atomic_inc_not_zero(&orig_node->refcount))
60 orig_node = NULL;
61
62out:
63 rcu_read_unlock();
64 return orig_node;
55} 65}
56 66
57void gw_deselect(struct bat_priv *bat_priv) 67void gw_deselect(struct bat_priv *bat_priv)
58{ 68{
59 struct gw_node *gw_node = bat_priv->curr_gw; 69 struct gw_node *gw_node;
60 70
61 bat_priv->curr_gw = NULL; 71 spin_lock_bh(&bat_priv->gw_list_lock);
72 gw_node = rcu_dereference(bat_priv->curr_gw);
73 rcu_assign_pointer(bat_priv->curr_gw, NULL);
74 spin_unlock_bh(&bat_priv->gw_list_lock);
62 75
63 if (gw_node) 76 if (gw_node)
64 kref_put(&gw_node->refcount, gw_node_free_ref); 77 gw_node_free_ref(gw_node);
65} 78}
66 79
67static struct gw_node *gw_select(struct bat_priv *bat_priv, 80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
68 struct gw_node *new_gw_node)
69{ 81{
70 struct gw_node *curr_gw_node = bat_priv->curr_gw; 82 struct gw_node *curr_gw_node;
71 83
72 if (new_gw_node) 84 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
73 kref_get(&new_gw_node->refcount); 85 new_gw_node = NULL;
86
87 spin_lock_bh(&bat_priv->gw_list_lock);
88 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
89 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
90 spin_unlock_bh(&bat_priv->gw_list_lock);
74 91
75 bat_priv->curr_gw = new_gw_node; 92 if (curr_gw_node)
76 return curr_gw_node; 93 gw_node_free_ref(curr_gw_node);
77} 94}
78 95
79void gw_election(struct bat_priv *bat_priv) 96void gw_election(struct bat_priv *bat_priv)
80{ 97{
81 struct hlist_node *node; 98 struct hlist_node *node;
82 struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL; 99 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
83 uint8_t max_tq = 0; 100 uint8_t max_tq = 0;
84 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 101 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
85 int down, up; 102 int down, up;
@@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv)
93 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 110 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
94 return; 111 return;
95 112
96 if (bat_priv->curr_gw) 113 rcu_read_lock();
114 curr_gw = rcu_dereference(bat_priv->curr_gw);
115 if (curr_gw) {
116 rcu_read_unlock();
97 return; 117 return;
118 }
98 119
99 rcu_read_lock();
100 if (hlist_empty(&bat_priv->gw_list)) { 120 if (hlist_empty(&bat_priv->gw_list)) {
101 rcu_read_unlock();
102 121
103 if (bat_priv->curr_gw) { 122 if (curr_gw) {
123 rcu_read_unlock();
104 bat_dbg(DBG_BATMAN, bat_priv, 124 bat_dbg(DBG_BATMAN, bat_priv,
105 "Removing selected gateway - " 125 "Removing selected gateway - "
106 "no gateway in range\n"); 126 "no gateway in range\n");
107 gw_deselect(bat_priv); 127 gw_deselect(bat_priv);
108 } 128 } else
129 rcu_read_unlock();
109 130
110 return; 131 return;
111 } 132 }
@@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv)
154 max_gw_factor = tmp_gw_factor; 175 max_gw_factor = tmp_gw_factor;
155 } 176 }
156 177
157 if (bat_priv->curr_gw != curr_gw_tmp) { 178 if (curr_gw != curr_gw_tmp) {
158 if ((bat_priv->curr_gw) && (!curr_gw_tmp)) 179 if ((curr_gw) && (!curr_gw_tmp))
159 bat_dbg(DBG_BATMAN, bat_priv, 180 bat_dbg(DBG_BATMAN, bat_priv,
160 "Removing selected gateway - " 181 "Removing selected gateway - "
161 "no gateway in range\n"); 182 "no gateway in range\n");
162 else if ((!bat_priv->curr_gw) && (curr_gw_tmp)) 183 else if ((!curr_gw) && (curr_gw_tmp))
163 bat_dbg(DBG_BATMAN, bat_priv, 184 bat_dbg(DBG_BATMAN, bat_priv,
164 "Adding route to gateway %pM " 185 "Adding route to gateway %pM "
165 "(gw_flags: %i, tq: %i)\n", 186 "(gw_flags: %i, tq: %i)\n",
@@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv)
174 curr_gw_tmp->orig_node->gw_flags, 195 curr_gw_tmp->orig_node->gw_flags,
175 curr_gw_tmp->orig_node->router->tq_avg); 196 curr_gw_tmp->orig_node->router->tq_avg);
176 197
177 old_gw_node = gw_select(bat_priv, curr_gw_tmp); 198 gw_select(bat_priv, curr_gw_tmp);
178 } 199 }
179 200
180 rcu_read_unlock(); 201 rcu_read_unlock();
181
182 /* the kfree() has to be outside of the rcu lock */
183 if (old_gw_node)
184 kref_put(&old_gw_node->refcount, gw_node_free_ref);
185} 202}
186 203
187void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 204void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
188{ 205{
189 struct gw_node *curr_gateway_tmp = bat_priv->curr_gw; 206 struct gw_node *curr_gateway_tmp;
190 uint8_t gw_tq_avg, orig_tq_avg; 207 uint8_t gw_tq_avg, orig_tq_avg;
191 208
209 rcu_read_lock();
210 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
192 if (!curr_gateway_tmp) 211 if (!curr_gateway_tmp)
193 return; 212 goto out_rcu;
194 213
195 if (!curr_gateway_tmp->orig_node) 214 if (!curr_gateway_tmp->orig_node)
196 goto deselect; 215 goto deselect_rcu;
197 216
198 if (!curr_gateway_tmp->orig_node->router) 217 if (!curr_gateway_tmp->orig_node->router)
199 goto deselect; 218 goto deselect_rcu;
200 219
201 /* this node already is the gateway */ 220 /* this node already is the gateway */
202 if (curr_gateway_tmp->orig_node == orig_node) 221 if (curr_gateway_tmp->orig_node == orig_node)
203 return; 222 goto out_rcu;
204 223
205 if (!orig_node->router) 224 if (!orig_node->router)
206 return; 225 goto out_rcu;
207 226
208 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; 227 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
228 rcu_read_unlock();
229
209 orig_tq_avg = orig_node->router->tq_avg; 230 orig_tq_avg = orig_node->router->tq_avg;
210 231
211 /* the TQ value has to be better */ 232 /* the TQ value has to be better */
212 if (orig_tq_avg < gw_tq_avg) 233 if (orig_tq_avg < gw_tq_avg)
213 return; 234 goto out;
214 235
215 /** 236 /**
216 * if the routing class is greater than 3 the value tells us how much 237 * if the routing class is greater than 3 the value tells us how much
@@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
218 **/ 239 **/
219 if ((atomic_read(&bat_priv->gw_sel_class) > 3) && 240 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
220 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) 241 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
221 return; 242 goto out;
222 243
223 bat_dbg(DBG_BATMAN, bat_priv, 244 bat_dbg(DBG_BATMAN, bat_priv,
224 "Restarting gateway selection: better gateway found (tq curr: " 245 "Restarting gateway selection: better gateway found (tq curr: "
225 "%i, tq new: %i)\n", 246 "%i, tq new: %i)\n",
226 gw_tq_avg, orig_tq_avg); 247 gw_tq_avg, orig_tq_avg);
248 goto deselect;
227 249
250out_rcu:
251 rcu_read_unlock();
252 goto out;
253deselect_rcu:
254 rcu_read_unlock();
228deselect: 255deselect:
229 gw_deselect(bat_priv); 256 gw_deselect(bat_priv);
257out:
258 return;
230} 259}
231 260
232static void gw_node_add(struct bat_priv *bat_priv, 261static void gw_node_add(struct bat_priv *bat_priv,
@@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv,
242 memset(gw_node, 0, sizeof(struct gw_node)); 271 memset(gw_node, 0, sizeof(struct gw_node));
243 INIT_HLIST_NODE(&gw_node->list); 272 INIT_HLIST_NODE(&gw_node->list);
244 gw_node->orig_node = orig_node; 273 gw_node->orig_node = orig_node;
245 kref_init(&gw_node->refcount); 274 atomic_set(&gw_node->refcount, 1);
246 275
247 spin_lock_bh(&bat_priv->gw_list_lock); 276 spin_lock_bh(&bat_priv->gw_list_lock);
248 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); 277 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
@@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv,
283 "Gateway %pM removed from gateway list\n", 312 "Gateway %pM removed from gateway list\n",
284 orig_node->orig); 313 orig_node->orig);
285 314
286 if (gw_node == bat_priv->curr_gw) { 315 if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
287 rcu_read_unlock(); 316 rcu_read_unlock();
288 gw_deselect(bat_priv); 317 gw_deselect(bat_priv);
289 return; 318 return;
@@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv)
321 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 350 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
322 continue; 351 continue;
323 352
324 if (bat_priv->curr_gw == gw_node) 353 if (rcu_dereference(bat_priv->curr_gw) == gw_node)
325 gw_deselect(bat_priv); 354 gw_deselect(bat_priv);
326 355
327 hlist_del_rcu(&gw_node->list); 356 hlist_del_rcu(&gw_node->list);
328 call_rcu(&gw_node->rcu, gw_node_free_rcu); 357 gw_node_free_ref(gw_node);
329 } 358 }
330 359
331 360
@@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv)
335static int _write_buffer_text(struct bat_priv *bat_priv, 364static int _write_buffer_text(struct bat_priv *bat_priv,
336 struct seq_file *seq, struct gw_node *gw_node) 365 struct seq_file *seq, struct gw_node *gw_node)
337{ 366{
338 int down, up; 367 struct gw_node *curr_gw;
368 int down, up, ret;
339 369
340 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 370 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
341 371
342 return seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 372 rcu_read_lock();
343 (bat_priv->curr_gw == gw_node ? "=>" : " "), 373 curr_gw = rcu_dereference(bat_priv->curr_gw);
374
375 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
376 (curr_gw == gw_node ? "=>" : " "),
344 gw_node->orig_node->orig, 377 gw_node->orig_node->orig,
345 gw_node->orig_node->router->tq_avg, 378 gw_node->orig_node->router->tq_avg,
346 gw_node->orig_node->router->addr, 379 gw_node->orig_node->router->addr,
@@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv,
350 (down > 2048 ? "MBit" : "KBit"), 383 (down > 2048 ? "MBit" : "KBit"),
351 (up > 2048 ? up / 1024 : up), 384 (up > 2048 ? up / 1024 : up),
352 (up > 2048 ? "MBit" : "KBit")); 385 (up > 2048 ? "MBit" : "KBit"));
386
387 rcu_read_unlock();
388 return ret;
353} 389}
354 390
355int gw_client_seq_print_text(struct seq_file *seq, void *offset) 391int gw_client_seq_print_text(struct seq_file *seq, void *offset)
@@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
470 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) 506 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
471 return -1; 507 return -1;
472 508
473 if (!bat_priv->curr_gw) 509 rcu_read_lock();
510 if (!rcu_dereference(bat_priv->curr_gw)) {
511 rcu_read_unlock();
474 return 0; 512 return 0;
513 }
514 rcu_read_unlock();
475 515
476 return 1; 516 return 1;
477} 517}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f2131f45aa9b..b3058e46ee6b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -31,8 +31,8 @@
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33 33
34/* protect update critical side of if_list - but not the content */ 34/* protect update critical side of hardif_list - but not the content */
35static DEFINE_SPINLOCK(if_list_lock); 35static DEFINE_SPINLOCK(hardif_list_lock);
36 36
37 37
38static int batman_skb_recv(struct sk_buff *skb, 38static int batman_skb_recv(struct sk_buff *skb,
@@ -40,33 +40,31 @@ static int batman_skb_recv(struct sk_buff *skb,
40 struct packet_type *ptype, 40 struct packet_type *ptype,
41 struct net_device *orig_dev); 41 struct net_device *orig_dev);
42 42
43static void hardif_free_rcu(struct rcu_head *rcu) 43void hardif_free_rcu(struct rcu_head *rcu)
44{ 44{
45 struct batman_if *batman_if; 45 struct hard_iface *hard_iface;
46 46
47 batman_if = container_of(rcu, struct batman_if, rcu); 47 hard_iface = container_of(rcu, struct hard_iface, rcu);
48 dev_put(batman_if->net_dev); 48 dev_put(hard_iface->net_dev);
49 kref_put(&batman_if->refcount, hardif_free_ref); 49 kfree(hard_iface);
50} 50}
51 51
52struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) 52struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
53{ 53{
54 struct batman_if *batman_if; 54 struct hard_iface *hard_iface;
55 55
56 rcu_read_lock(); 56 rcu_read_lock();
57 list_for_each_entry_rcu(batman_if, &if_list, list) { 57 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
58 if (batman_if->net_dev == net_dev) 58 if (hard_iface->net_dev == net_dev &&
59 atomic_inc_not_zero(&hard_iface->refcount))
59 goto out; 60 goto out;
60 } 61 }
61 62
62 batman_if = NULL; 63 hard_iface = NULL;
63 64
64out: 65out:
65 if (batman_if)
66 kref_get(&batman_if->refcount);
67
68 rcu_read_unlock(); 66 rcu_read_unlock();
69 return batman_if; 67 return hard_iface;
70} 68}
71 69
72static int is_valid_iface(struct net_device *net_dev) 70static int is_valid_iface(struct net_device *net_dev)
@@ -81,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
81 return 0; 79 return 0;
82 80
83 /* no batman over batman */ 81 /* no batman over batman */
84#ifdef HAVE_NET_DEVICE_OPS 82 if (softif_is_valid(net_dev))
85 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
86 return 0;
87#else
88 if (net_dev->hard_start_xmit == interface_tx)
89 return 0; 83 return 0;
90#endif
91 84
92 /* Device is being bridged */ 85 /* Device is being bridged */
93 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) 86 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
@@ -96,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
96 return 1; 89 return 1;
97} 90}
98 91
99static struct batman_if *get_active_batman_if(struct net_device *soft_iface) 92static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
100{ 93{
101 struct batman_if *batman_if; 94 struct hard_iface *hard_iface;
102 95
103 rcu_read_lock(); 96 rcu_read_lock();
104 list_for_each_entry_rcu(batman_if, &if_list, list) { 97 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
105 if (batman_if->soft_iface != soft_iface) 98 if (hard_iface->soft_iface != soft_iface)
106 continue; 99 continue;
107 100
108 if (batman_if->if_status == IF_ACTIVE) 101 if (hard_iface->if_status == IF_ACTIVE &&
102 atomic_inc_not_zero(&hard_iface->refcount))
109 goto out; 103 goto out;
110 } 104 }
111 105
112 batman_if = NULL; 106 hard_iface = NULL;
113 107
114out: 108out:
115 if (batman_if)
116 kref_get(&batman_if->refcount);
117
118 rcu_read_unlock(); 109 rcu_read_unlock();
119 return batman_if; 110 return hard_iface;
120} 111}
121 112
122static void update_primary_addr(struct bat_priv *bat_priv) 113static void update_primary_addr(struct bat_priv *bat_priv)
@@ -132,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
132} 123}
133 124
134static void set_primary_if(struct bat_priv *bat_priv, 125static void set_primary_if(struct bat_priv *bat_priv,
135 struct batman_if *batman_if) 126 struct hard_iface *hard_iface)
136{ 127{
137 struct batman_packet *batman_packet; 128 struct batman_packet *batman_packet;
138 struct batman_if *old_if; 129 struct hard_iface *old_if;
139 130
140 if (batman_if) 131 if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount))
141 kref_get(&batman_if->refcount); 132 hard_iface = NULL;
142 133
143 old_if = bat_priv->primary_if; 134 old_if = bat_priv->primary_if;
144 bat_priv->primary_if = batman_if; 135 bat_priv->primary_if = hard_iface;
145 136
146 if (old_if) 137 if (old_if)
147 kref_put(&old_if->refcount, hardif_free_ref); 138 hardif_free_ref(old_if);
148 139
149 if (!bat_priv->primary_if) 140 if (!bat_priv->primary_if)
150 return; 141 return;
151 142
152 batman_packet = (struct batman_packet *)(batman_if->packet_buff); 143 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
153 batman_packet->flags = PRIMARIES_FIRST_HOP; 144 batman_packet->flags = PRIMARIES_FIRST_HOP;
154 batman_packet->ttl = TTL; 145 batman_packet->ttl = TTL;
155 146
@@ -162,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
162 atomic_set(&bat_priv->hna_local_changed, 1); 153 atomic_set(&bat_priv->hna_local_changed, 1);
163} 154}
164 155
165static bool hardif_is_iface_up(struct batman_if *batman_if) 156static bool hardif_is_iface_up(struct hard_iface *hard_iface)
166{ 157{
167 if (batman_if->net_dev->flags & IFF_UP) 158 if (hard_iface->net_dev->flags & IFF_UP)
168 return true; 159 return true;
169 160
170 return false; 161 return false;
171} 162}
172 163
173static void update_mac_addresses(struct batman_if *batman_if) 164static void update_mac_addresses(struct hard_iface *hard_iface)
174{ 165{
175 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, 166 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
176 batman_if->net_dev->dev_addr, ETH_ALEN); 167 hard_iface->net_dev->dev_addr, ETH_ALEN);
177 memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, 168 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
178 batman_if->net_dev->dev_addr, ETH_ALEN); 169 hard_iface->net_dev->dev_addr, ETH_ALEN);
179} 170}
180 171
181static void check_known_mac_addr(struct net_device *net_dev) 172static void check_known_mac_addr(struct net_device *net_dev)
182{ 173{
183 struct batman_if *batman_if; 174 struct hard_iface *hard_iface;
184 175
185 rcu_read_lock(); 176 rcu_read_lock();
186 list_for_each_entry_rcu(batman_if, &if_list, list) { 177 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
187 if ((batman_if->if_status != IF_ACTIVE) && 178 if ((hard_iface->if_status != IF_ACTIVE) &&
188 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 179 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
189 continue; 180 continue;
190 181
191 if (batman_if->net_dev == net_dev) 182 if (hard_iface->net_dev == net_dev)
192 continue; 183 continue;
193 184
194 if (!compare_orig(batman_if->net_dev->dev_addr, 185 if (!compare_eth(hard_iface->net_dev->dev_addr,
195 net_dev->dev_addr)) 186 net_dev->dev_addr))
196 continue; 187 continue;
197 188
198 pr_warning("The newly added mac address (%pM) already exists " 189 pr_warning("The newly added mac address (%pM) already exists "
199 "on: %s\n", net_dev->dev_addr, 190 "on: %s\n", net_dev->dev_addr,
200 batman_if->net_dev->name); 191 hard_iface->net_dev->name);
201 pr_warning("It is strongly recommended to keep mac addresses " 192 pr_warning("It is strongly recommended to keep mac addresses "
202 "unique to avoid problems!\n"); 193 "unique to avoid problems!\n");
203 } 194 }
@@ -207,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
207int hardif_min_mtu(struct net_device *soft_iface) 198int hardif_min_mtu(struct net_device *soft_iface)
208{ 199{
209 struct bat_priv *bat_priv = netdev_priv(soft_iface); 200 struct bat_priv *bat_priv = netdev_priv(soft_iface);
210 struct batman_if *batman_if; 201 struct hard_iface *hard_iface;
211 /* allow big frames if all devices are capable to do so 202 /* allow big frames if all devices are capable to do so
212 * (have MTU > 1500 + BAT_HEADER_LEN) */ 203 * (have MTU > 1500 + BAT_HEADER_LEN) */
213 int min_mtu = ETH_DATA_LEN; 204 int min_mtu = ETH_DATA_LEN;
@@ -216,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
216 goto out; 207 goto out;
217 208
218 rcu_read_lock(); 209 rcu_read_lock();
219 list_for_each_entry_rcu(batman_if, &if_list, list) { 210 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
220 if ((batman_if->if_status != IF_ACTIVE) && 211 if ((hard_iface->if_status != IF_ACTIVE) &&
221 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 212 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
222 continue; 213 continue;
223 214
224 if (batman_if->soft_iface != soft_iface) 215 if (hard_iface->soft_iface != soft_iface)
225 continue; 216 continue;
226 217
227 min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, 218 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
228 min_mtu); 219 min_mtu);
229 } 220 }
230 rcu_read_unlock(); 221 rcu_read_unlock();
@@ -242,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
242 soft_iface->mtu = min_mtu; 233 soft_iface->mtu = min_mtu;
243} 234}
244 235
245static void hardif_activate_interface(struct batman_if *batman_if) 236static void hardif_activate_interface(struct hard_iface *hard_iface)
246{ 237{
247 struct bat_priv *bat_priv; 238 struct bat_priv *bat_priv;
248 239
249 if (batman_if->if_status != IF_INACTIVE) 240 if (hard_iface->if_status != IF_INACTIVE)
250 return; 241 return;
251 242
252 bat_priv = netdev_priv(batman_if->soft_iface); 243 bat_priv = netdev_priv(hard_iface->soft_iface);
253 244
254 update_mac_addresses(batman_if); 245 update_mac_addresses(hard_iface);
255 batman_if->if_status = IF_TO_BE_ACTIVATED; 246 hard_iface->if_status = IF_TO_BE_ACTIVATED;
256 247
257 /** 248 /**
258 * the first active interface becomes our primary interface or 249 * the first active interface becomes our primary interface or
259 * the next active interface after the old primay interface was removed 250 * the next active interface after the old primay interface was removed
260 */ 251 */
261 if (!bat_priv->primary_if) 252 if (!bat_priv->primary_if)
262 set_primary_if(bat_priv, batman_if); 253 set_primary_if(bat_priv, hard_iface);
263 254
264 bat_info(batman_if->soft_iface, "Interface activated: %s\n", 255 bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
265 batman_if->net_dev->name); 256 hard_iface->net_dev->name);
266 257
267 update_min_mtu(batman_if->soft_iface); 258 update_min_mtu(hard_iface->soft_iface);
268 return; 259 return;
269} 260}
270 261
271static void hardif_deactivate_interface(struct batman_if *batman_if) 262static void hardif_deactivate_interface(struct hard_iface *hard_iface)
272{ 263{
273 if ((batman_if->if_status != IF_ACTIVE) && 264 if ((hard_iface->if_status != IF_ACTIVE) &&
274 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 265 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
275 return; 266 return;
276 267
277 batman_if->if_status = IF_INACTIVE; 268 hard_iface->if_status = IF_INACTIVE;
278 269
279 bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", 270 bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
280 batman_if->net_dev->name); 271 hard_iface->net_dev->name);
281 272
282 update_min_mtu(batman_if->soft_iface); 273 update_min_mtu(hard_iface->soft_iface);
283} 274}
284 275
285int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) 276int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
286{ 277{
287 struct bat_priv *bat_priv; 278 struct bat_priv *bat_priv;
288 struct batman_packet *batman_packet; 279 struct batman_packet *batman_packet;
280 struct net_device *soft_iface;
281 int ret;
282
283 if (hard_iface->if_status != IF_NOT_IN_USE)
284 goto out;
289 285
290 if (batman_if->if_status != IF_NOT_IN_USE) 286 if (!atomic_inc_not_zero(&hard_iface->refcount))
291 goto out; 287 goto out;
292 288
293 batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); 289 soft_iface = dev_get_by_name(&init_net, iface_name);
294 290
295 if (!batman_if->soft_iface) { 291 if (!soft_iface) {
296 batman_if->soft_iface = softif_create(iface_name); 292 soft_iface = softif_create(iface_name);
297 293
298 if (!batman_if->soft_iface) 294 if (!soft_iface) {
295 ret = -ENOMEM;
299 goto err; 296 goto err;
297 }
300 298
301 /* dev_get_by_name() increases the reference counter for us */ 299 /* dev_get_by_name() increases the reference counter for us */
302 dev_hold(batman_if->soft_iface); 300 dev_hold(soft_iface);
303 } 301 }
304 302
305 bat_priv = netdev_priv(batman_if->soft_iface); 303 if (!softif_is_valid(soft_iface)) {
306 batman_if->packet_len = BAT_PACKET_LEN; 304 pr_err("Can't create batman mesh interface %s: "
307 batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); 305 "already exists as regular interface\n",
306 soft_iface->name);
307 dev_put(soft_iface);
308 ret = -EINVAL;
309 goto err;
310 }
311
312 hard_iface->soft_iface = soft_iface;
313 bat_priv = netdev_priv(hard_iface->soft_iface);
314 hard_iface->packet_len = BAT_PACKET_LEN;
315 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
308 316
309 if (!batman_if->packet_buff) { 317 if (!hard_iface->packet_buff) {
310 bat_err(batman_if->soft_iface, "Can't add interface packet " 318 bat_err(hard_iface->soft_iface, "Can't add interface packet "
311 "(%s): out of memory\n", batman_if->net_dev->name); 319 "(%s): out of memory\n", hard_iface->net_dev->name);
320 ret = -ENOMEM;
312 goto err; 321 goto err;
313 } 322 }
314 323
315 batman_packet = (struct batman_packet *)(batman_if->packet_buff); 324 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
316 batman_packet->packet_type = BAT_PACKET; 325 batman_packet->packet_type = BAT_PACKET;
317 batman_packet->version = COMPAT_VERSION; 326 batman_packet->version = COMPAT_VERSION;
318 batman_packet->flags = 0; 327 batman_packet->flags = 0;
@@ -320,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
320 batman_packet->tq = TQ_MAX_VALUE; 329 batman_packet->tq = TQ_MAX_VALUE;
321 batman_packet->num_hna = 0; 330 batman_packet->num_hna = 0;
322 331
323 batman_if->if_num = bat_priv->num_ifaces; 332 hard_iface->if_num = bat_priv->num_ifaces;
324 bat_priv->num_ifaces++; 333 bat_priv->num_ifaces++;
325 batman_if->if_status = IF_INACTIVE; 334 hard_iface->if_status = IF_INACTIVE;
326 orig_hash_add_if(batman_if, bat_priv->num_ifaces); 335 orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
327 336
328 batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); 337 hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
329 batman_if->batman_adv_ptype.func = batman_skb_recv; 338 hard_iface->batman_adv_ptype.func = batman_skb_recv;
330 batman_if->batman_adv_ptype.dev = batman_if->net_dev; 339 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
331 kref_get(&batman_if->refcount); 340 dev_add_pack(&hard_iface->batman_adv_ptype);
332 dev_add_pack(&batman_if->batman_adv_ptype);
333 341
334 atomic_set(&batman_if->seqno, 1); 342 atomic_set(&hard_iface->seqno, 1);
335 atomic_set(&batman_if->frag_seqno, 1); 343 atomic_set(&hard_iface->frag_seqno, 1);
336 bat_info(batman_if->soft_iface, "Adding interface: %s\n", 344 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
337 batman_if->net_dev->name); 345 hard_iface->net_dev->name);
338 346
339 if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < 347 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
340 ETH_DATA_LEN + BAT_HEADER_LEN) 348 ETH_DATA_LEN + BAT_HEADER_LEN)
341 bat_info(batman_if->soft_iface, 349 bat_info(hard_iface->soft_iface,
342 "The MTU of interface %s is too small (%i) to handle " 350 "The MTU of interface %s is too small (%i) to handle "
343 "the transport of batman-adv packets. Packets going " 351 "the transport of batman-adv packets. Packets going "
344 "over this interface will be fragmented on layer2 " 352 "over this interface will be fragmented on layer2 "
345 "which could impact the performance. Setting the MTU " 353 "which could impact the performance. Setting the MTU "
346 "to %zi would solve the problem.\n", 354 "to %zi would solve the problem.\n",
347 batman_if->net_dev->name, batman_if->net_dev->mtu, 355 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
348 ETH_DATA_LEN + BAT_HEADER_LEN); 356 ETH_DATA_LEN + BAT_HEADER_LEN);
349 357
350 if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < 358 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
351 ETH_DATA_LEN + BAT_HEADER_LEN) 359 ETH_DATA_LEN + BAT_HEADER_LEN)
352 bat_info(batman_if->soft_iface, 360 bat_info(hard_iface->soft_iface,
353 "The MTU of interface %s is too small (%i) to handle " 361 "The MTU of interface %s is too small (%i) to handle "
354 "the transport of batman-adv packets. If you experience" 362 "the transport of batman-adv packets. If you experience"
355 " problems getting traffic through try increasing the " 363 " problems getting traffic through try increasing the "
356 "MTU to %zi.\n", 364 "MTU to %zi.\n",
357 batman_if->net_dev->name, batman_if->net_dev->mtu, 365 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
358 ETH_DATA_LEN + BAT_HEADER_LEN); 366 ETH_DATA_LEN + BAT_HEADER_LEN);
359 367
360 if (hardif_is_iface_up(batman_if)) 368 if (hardif_is_iface_up(hard_iface))
361 hardif_activate_interface(batman_if); 369 hardif_activate_interface(hard_iface);
362 else 370 else
363 bat_err(batman_if->soft_iface, "Not using interface %s " 371 bat_err(hard_iface->soft_iface, "Not using interface %s "
364 "(retrying later): interface not active\n", 372 "(retrying later): interface not active\n",
365 batman_if->net_dev->name); 373 hard_iface->net_dev->name);
366 374
367 /* begin scheduling originator messages on that interface */ 375 /* begin scheduling originator messages on that interface */
368 schedule_own_packet(batman_if); 376 schedule_own_packet(hard_iface);
369 377
370out: 378out:
371 return 0; 379 return 0;
372 380
373err: 381err:
374 return -ENOMEM; 382 hardif_free_ref(hard_iface);
383 return ret;
375} 384}
376 385
377void hardif_disable_interface(struct batman_if *batman_if) 386void hardif_disable_interface(struct hard_iface *hard_iface)
378{ 387{
379 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 388 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
380 389
381 if (batman_if->if_status == IF_ACTIVE) 390 if (hard_iface->if_status == IF_ACTIVE)
382 hardif_deactivate_interface(batman_if); 391 hardif_deactivate_interface(hard_iface);
383 392
384 if (batman_if->if_status != IF_INACTIVE) 393 if (hard_iface->if_status != IF_INACTIVE)
385 return; 394 return;
386 395
387 bat_info(batman_if->soft_iface, "Removing interface: %s\n", 396 bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
388 batman_if->net_dev->name); 397 hard_iface->net_dev->name);
389 dev_remove_pack(&batman_if->batman_adv_ptype); 398 dev_remove_pack(&hard_iface->batman_adv_ptype);
390 kref_put(&batman_if->refcount, hardif_free_ref);
391 399
392 bat_priv->num_ifaces--; 400 bat_priv->num_ifaces--;
393 orig_hash_del_if(batman_if, bat_priv->num_ifaces); 401 orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
394 402
395 if (batman_if == bat_priv->primary_if) { 403 if (hard_iface == bat_priv->primary_if) {
396 struct batman_if *new_if; 404 struct hard_iface *new_if;
397 405
398 new_if = get_active_batman_if(batman_if->soft_iface); 406 new_if = hardif_get_active(hard_iface->soft_iface);
399 set_primary_if(bat_priv, new_if); 407 set_primary_if(bat_priv, new_if);
400 408
401 if (new_if) 409 if (new_if)
402 kref_put(&new_if->refcount, hardif_free_ref); 410 hardif_free_ref(new_if);
403 } 411 }
404 412
405 kfree(batman_if->packet_buff); 413 kfree(hard_iface->packet_buff);
406 batman_if->packet_buff = NULL; 414 hard_iface->packet_buff = NULL;
407 batman_if->if_status = IF_NOT_IN_USE; 415 hard_iface->if_status = IF_NOT_IN_USE;
408 416
409 /* delete all references to this batman_if */ 417 /* delete all references to this hard_iface */
410 purge_orig_ref(bat_priv); 418 purge_orig_ref(bat_priv);
411 purge_outstanding_packets(bat_priv, batman_if); 419 purge_outstanding_packets(bat_priv, hard_iface);
412 dev_put(batman_if->soft_iface); 420 dev_put(hard_iface->soft_iface);
413 421
414 /* nobody uses this interface anymore */ 422 /* nobody uses this interface anymore */
415 if (!bat_priv->num_ifaces) 423 if (!bat_priv->num_ifaces)
416 softif_destroy(batman_if->soft_iface); 424 softif_destroy(hard_iface->soft_iface);
417 425
418 batman_if->soft_iface = NULL; 426 hard_iface->soft_iface = NULL;
427 hardif_free_ref(hard_iface);
419} 428}
420 429
421static struct batman_if *hardif_add_interface(struct net_device *net_dev) 430static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
422{ 431{
423 struct batman_if *batman_if; 432 struct hard_iface *hard_iface;
424 int ret; 433 int ret;
425 434
426 ret = is_valid_iface(net_dev); 435 ret = is_valid_iface(net_dev);
@@ -429,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
429 438
430 dev_hold(net_dev); 439 dev_hold(net_dev);
431 440
432 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); 441 hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
433 if (!batman_if) { 442 if (!hard_iface) {
434 pr_err("Can't add interface (%s): out of memory\n", 443 pr_err("Can't add interface (%s): out of memory\n",
435 net_dev->name); 444 net_dev->name);
436 goto release_dev; 445 goto release_dev;
437 } 446 }
438 447
439 ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); 448 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
440 if (ret) 449 if (ret)
441 goto free_if; 450 goto free_if;
442 451
443 batman_if->if_num = -1; 452 hard_iface->if_num = -1;
444 batman_if->net_dev = net_dev; 453 hard_iface->net_dev = net_dev;
445 batman_if->soft_iface = NULL; 454 hard_iface->soft_iface = NULL;
446 batman_if->if_status = IF_NOT_IN_USE; 455 hard_iface->if_status = IF_NOT_IN_USE;
447 INIT_LIST_HEAD(&batman_if->list); 456 INIT_LIST_HEAD(&hard_iface->list);
448 kref_init(&batman_if->refcount); 457 /* extra reference for return */
458 atomic_set(&hard_iface->refcount, 2);
449 459
450 check_known_mac_addr(batman_if->net_dev); 460 check_known_mac_addr(hard_iface->net_dev);
451 461
452 spin_lock(&if_list_lock); 462 spin_lock(&hardif_list_lock);
453 list_add_tail_rcu(&batman_if->list, &if_list); 463 list_add_tail_rcu(&hard_iface->list, &hardif_list);
454 spin_unlock(&if_list_lock); 464 spin_unlock(&hardif_list_lock);
455 465
456 /* extra reference for return */ 466 return hard_iface;
457 kref_get(&batman_if->refcount);
458 return batman_if;
459 467
460free_if: 468free_if:
461 kfree(batman_if); 469 kfree(hard_iface);
462release_dev: 470release_dev:
463 dev_put(net_dev); 471 dev_put(net_dev);
464out: 472out:
465 return NULL; 473 return NULL;
466} 474}
467 475
468static void hardif_remove_interface(struct batman_if *batman_if) 476static void hardif_remove_interface(struct hard_iface *hard_iface)
469{ 477{
470 /* first deactivate interface */ 478 /* first deactivate interface */
471 if (batman_if->if_status != IF_NOT_IN_USE) 479 if (hard_iface->if_status != IF_NOT_IN_USE)
472 hardif_disable_interface(batman_if); 480 hardif_disable_interface(hard_iface);
473 481
474 if (batman_if->if_status != IF_NOT_IN_USE) 482 if (hard_iface->if_status != IF_NOT_IN_USE)
475 return; 483 return;
476 484
477 batman_if->if_status = IF_TO_BE_REMOVED; 485 hard_iface->if_status = IF_TO_BE_REMOVED;
478 sysfs_del_hardif(&batman_if->hardif_obj); 486 sysfs_del_hardif(&hard_iface->hardif_obj);
479 call_rcu(&batman_if->rcu, hardif_free_rcu); 487 hardif_free_ref(hard_iface);
480} 488}
481 489
482void hardif_remove_interfaces(void) 490void hardif_remove_interfaces(void)
483{ 491{
484 struct batman_if *batman_if, *batman_if_tmp; 492 struct hard_iface *hard_iface, *hard_iface_tmp;
485 struct list_head if_queue; 493 struct list_head if_queue;
486 494
487 INIT_LIST_HEAD(&if_queue); 495 INIT_LIST_HEAD(&if_queue);
488 496
489 spin_lock(&if_list_lock); 497 spin_lock(&hardif_list_lock);
490 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { 498 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
491 list_del_rcu(&batman_if->list); 499 &hardif_list, list) {
492 list_add_tail(&batman_if->list, &if_queue); 500 list_del_rcu(&hard_iface->list);
501 list_add_tail(&hard_iface->list, &if_queue);
493 } 502 }
494 spin_unlock(&if_list_lock); 503 spin_unlock(&hardif_list_lock);
495 504
496 rtnl_lock(); 505 rtnl_lock();
497 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { 506 list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
498 hardif_remove_interface(batman_if); 507 hardif_remove_interface(hard_iface);
499 } 508 }
500 rtnl_unlock(); 509 rtnl_unlock();
501} 510}
@@ -504,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
504 unsigned long event, void *ptr) 513 unsigned long event, void *ptr)
505{ 514{
506 struct net_device *net_dev = (struct net_device *)ptr; 515 struct net_device *net_dev = (struct net_device *)ptr;
507 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); 516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
508 struct bat_priv *bat_priv; 517 struct bat_priv *bat_priv;
509 518
510 if (!batman_if && event == NETDEV_REGISTER) 519 if (!hard_iface && event == NETDEV_REGISTER)
511 batman_if = hardif_add_interface(net_dev); 520 hard_iface = hardif_add_interface(net_dev);
512 521
513 if (!batman_if) 522 if (!hard_iface)
514 goto out; 523 goto out;
515 524
516 switch (event) { 525 switch (event) {
517 case NETDEV_UP: 526 case NETDEV_UP:
518 hardif_activate_interface(batman_if); 527 hardif_activate_interface(hard_iface);
519 break; 528 break;
520 case NETDEV_GOING_DOWN: 529 case NETDEV_GOING_DOWN:
521 case NETDEV_DOWN: 530 case NETDEV_DOWN:
522 hardif_deactivate_interface(batman_if); 531 hardif_deactivate_interface(hard_iface);
523 break; 532 break;
524 case NETDEV_UNREGISTER: 533 case NETDEV_UNREGISTER:
525 spin_lock(&if_list_lock); 534 spin_lock(&hardif_list_lock);
526 list_del_rcu(&batman_if->list); 535 list_del_rcu(&hard_iface->list);
527 spin_unlock(&if_list_lock); 536 spin_unlock(&hardif_list_lock);
528 537
529 hardif_remove_interface(batman_if); 538 hardif_remove_interface(hard_iface);
530 break; 539 break;
531 case NETDEV_CHANGEMTU: 540 case NETDEV_CHANGEMTU:
532 if (batman_if->soft_iface) 541 if (hard_iface->soft_iface)
533 update_min_mtu(batman_if->soft_iface); 542 update_min_mtu(hard_iface->soft_iface);
534 break; 543 break;
535 case NETDEV_CHANGEADDR: 544 case NETDEV_CHANGEADDR:
536 if (batman_if->if_status == IF_NOT_IN_USE) 545 if (hard_iface->if_status == IF_NOT_IN_USE)
537 goto hardif_put; 546 goto hardif_put;
538 547
539 check_known_mac_addr(batman_if->net_dev); 548 check_known_mac_addr(hard_iface->net_dev);
540 update_mac_addresses(batman_if); 549 update_mac_addresses(hard_iface);
541 550
542 bat_priv = netdev_priv(batman_if->soft_iface); 551 bat_priv = netdev_priv(hard_iface->soft_iface);
543 if (batman_if == bat_priv->primary_if) 552 if (hard_iface == bat_priv->primary_if)
544 update_primary_addr(bat_priv); 553 update_primary_addr(bat_priv);
545 break; 554 break;
546 default: 555 default:
@@ -548,7 +557,7 @@ static int hard_if_event(struct notifier_block *this,
548 }; 557 };
549 558
550hardif_put: 559hardif_put:
551 kref_put(&batman_if->refcount, hardif_free_ref); 560 hardif_free_ref(hard_iface);
552out: 561out:
553 return NOTIFY_DONE; 562 return NOTIFY_DONE;
554} 563}
@@ -561,10 +570,10 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
561{ 570{
562 struct bat_priv *bat_priv; 571 struct bat_priv *bat_priv;
563 struct batman_packet *batman_packet; 572 struct batman_packet *batman_packet;
564 struct batman_if *batman_if; 573 struct hard_iface *hard_iface;
565 int ret; 574 int ret;
566 575
567 batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); 576 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
568 skb = skb_share_check(skb, GFP_ATOMIC); 577 skb = skb_share_check(skb, GFP_ATOMIC);
569 578
570 /* skb was released by skb_share_check() */ 579 /* skb was released by skb_share_check() */
@@ -580,16 +589,16 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
580 || !skb_mac_header(skb))) 589 || !skb_mac_header(skb)))
581 goto err_free; 590 goto err_free;
582 591
583 if (!batman_if->soft_iface) 592 if (!hard_iface->soft_iface)
584 goto err_free; 593 goto err_free;
585 594
586 bat_priv = netdev_priv(batman_if->soft_iface); 595 bat_priv = netdev_priv(hard_iface->soft_iface);
587 596
588 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 597 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
589 goto err_free; 598 goto err_free;
590 599
591 /* discard frames on not active interfaces */ 600 /* discard frames on not active interfaces */
592 if (batman_if->if_status != IF_ACTIVE) 601 if (hard_iface->if_status != IF_ACTIVE)
593 goto err_free; 602 goto err_free;
594 603
595 batman_packet = (struct batman_packet *)skb->data; 604 batman_packet = (struct batman_packet *)skb->data;
@@ -607,32 +616,32 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
607 switch (batman_packet->packet_type) { 616 switch (batman_packet->packet_type) {
608 /* batman originator packet */ 617 /* batman originator packet */
609 case BAT_PACKET: 618 case BAT_PACKET:
610 ret = recv_bat_packet(skb, batman_if); 619 ret = recv_bat_packet(skb, hard_iface);
611 break; 620 break;
612 621
613 /* batman icmp packet */ 622 /* batman icmp packet */
614 case BAT_ICMP: 623 case BAT_ICMP:
615 ret = recv_icmp_packet(skb, batman_if); 624 ret = recv_icmp_packet(skb, hard_iface);
616 break; 625 break;
617 626
618 /* unicast packet */ 627 /* unicast packet */
619 case BAT_UNICAST: 628 case BAT_UNICAST:
620 ret = recv_unicast_packet(skb, batman_if); 629 ret = recv_unicast_packet(skb, hard_iface);
621 break; 630 break;
622 631
623 /* fragmented unicast packet */ 632 /* fragmented unicast packet */
624 case BAT_UNICAST_FRAG: 633 case BAT_UNICAST_FRAG:
625 ret = recv_ucast_frag_packet(skb, batman_if); 634 ret = recv_ucast_frag_packet(skb, hard_iface);
626 break; 635 break;
627 636
628 /* broadcast packet */ 637 /* broadcast packet */
629 case BAT_BCAST: 638 case BAT_BCAST:
630 ret = recv_bcast_packet(skb, batman_if); 639 ret = recv_bcast_packet(skb, hard_iface);
631 break; 640 break;
632 641
633 /* vis packet */ 642 /* vis packet */
634 case BAT_VIS: 643 case BAT_VIS:
635 ret = recv_vis_packet(skb, batman_if); 644 ret = recv_vis_packet(skb, hard_iface);
636 break; 645 break;
637 default: 646 default:
638 ret = NET_RX_DROP; 647 ret = NET_RX_DROP;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index ad195438428a..a9ddf36e51c8 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -31,19 +31,18 @@
31 31
32extern struct notifier_block hard_if_notifier; 32extern struct notifier_block hard_if_notifier;
33 33
34struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev); 34struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev);
35int hardif_enable_interface(struct batman_if *batman_if, char *iface_name); 35int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name);
36void hardif_disable_interface(struct batman_if *batman_if); 36void hardif_disable_interface(struct hard_iface *hard_iface);
37void hardif_remove_interfaces(void); 37void hardif_remove_interfaces(void);
38int hardif_min_mtu(struct net_device *soft_iface); 38int hardif_min_mtu(struct net_device *soft_iface);
39void update_min_mtu(struct net_device *soft_iface); 39void update_min_mtu(struct net_device *soft_iface);
40void hardif_free_rcu(struct rcu_head *rcu);
40 41
41static inline void hardif_free_ref(struct kref *refcount) 42static inline void hardif_free_ref(struct hard_iface *hard_iface)
42{ 43{
43 struct batman_if *batman_if; 44 if (atomic_dec_and_test(&hard_iface->refcount))
44 45 call_rcu(&hard_iface->rcu, hardif_free_rcu);
45 batman_if = container_of(refcount, struct batman_if, refcount);
46 kfree(batman_if);
47} 46}
48 47
49#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ 48#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index fa2693973ab8..c5213d8f2cca 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
27{ 27{
28 int i; 28 int i;
29 29
30 for (i = 0 ; i < hash->size; i++) 30 for (i = 0 ; i < hash->size; i++) {
31 INIT_HLIST_HEAD(&hash->table[i]); 31 INIT_HLIST_HEAD(&hash->table[i]);
32 spin_lock_init(&hash->list_locks[i]);
33 }
32} 34}
33 35
34/* free only the hashtable and the hash itself. */ 36/* free only the hashtable and the hash itself. */
35void hash_destroy(struct hashtable_t *hash) 37void hash_destroy(struct hashtable_t *hash)
36{ 38{
39 kfree(hash->list_locks);
37 kfree(hash->table); 40 kfree(hash->table);
38 kfree(hash); 41 kfree(hash);
39} 42}
@@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size)
43{ 46{
44 struct hashtable_t *hash; 47 struct hashtable_t *hash;
45 48
46 hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC); 49 hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
47
48 if (!hash) 50 if (!hash)
49 return NULL; 51 return NULL;
50 52
51 hash->size = size;
52 hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC); 53 hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
54 if (!hash->table)
55 goto free_hash;
53 56
54 if (!hash->table) { 57 hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
55 kfree(hash); 58 if (!hash->list_locks)
56 return NULL; 59 goto free_table;
57 }
58 60
61 hash->size = size;
59 hash_init(hash); 62 hash_init(hash);
60
61 return hash; 63 return hash;
64
65free_table:
66 kfree(hash->table);
67free_hash:
68 kfree(hash);
69 return NULL;
62} 70}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index eae24402fd0a..434822b27473 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -28,21 +28,17 @@
28 * compare 2 element datas for their keys, 28 * compare 2 element datas for their keys,
29 * return 0 if same and not 0 if not 29 * return 0 if same and not 0 if not
30 * same */ 30 * same */
31typedef int (*hashdata_compare_cb)(void *, void *); 31typedef int (*hashdata_compare_cb)(struct hlist_node *, void *);
32 32
33/* the hashfunction, should return an index 33/* the hashfunction, should return an index
34 * based on the key in the data of the first 34 * based on the key in the data of the first
35 * argument and the size the second */ 35 * argument and the size the second */
36typedef int (*hashdata_choose_cb)(void *, int); 36typedef int (*hashdata_choose_cb)(void *, int);
37typedef void (*hashdata_free_cb)(void *, void *); 37typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
38
39struct element_t {
40 void *data; /* pointer to the data */
41 struct hlist_node hlist; /* bucket list pointer */
42};
43 38
44struct hashtable_t { 39struct hashtable_t {
45 struct hlist_head *table; /* the hashtable itself, with the buckets */ 40 struct hlist_head *table; /* the hashtable itself with the buckets */
41 spinlock_t *list_locks; /* spinlock for each hash list entry */
46 int size; /* size of hashtable */ 42 int size; /* size of hashtable */
47}; 43};
48 44
@@ -59,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash,
59 hashdata_free_cb free_cb, void *arg) 55 hashdata_free_cb free_cb, void *arg)
60{ 56{
61 struct hlist_head *head; 57 struct hlist_head *head;
62 struct hlist_node *walk, *safe; 58 struct hlist_node *node, *node_tmp;
63 struct element_t *bucket; 59 spinlock_t *list_lock; /* spinlock to protect write access */
64 int i; 60 int i;
65 61
66 for (i = 0; i < hash->size; i++) { 62 for (i = 0; i < hash->size; i++) {
67 head = &hash->table[i]; 63 head = &hash->table[i];
64 list_lock = &hash->list_locks[i];
68 65
69 hlist_for_each_safe(walk, safe, head) { 66 spin_lock_bh(list_lock);
70 bucket = hlist_entry(walk, struct element_t, hlist); 67 hlist_for_each_safe(node, node_tmp, head) {
71 if (free_cb) 68 hlist_del_rcu(node);
72 free_cb(bucket->data, arg);
73 69
74 hlist_del(walk); 70 if (free_cb)
75 kfree(bucket); 71 free_cb(node, arg);
76 } 72 }
73 spin_unlock_bh(list_lock);
77 } 74 }
78 75
79 hash_destroy(hash); 76 hash_destroy(hash);
@@ -82,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash,
82/* adds data to the hashtable. returns 0 on success, -1 on error */ 79/* adds data to the hashtable. returns 0 on success, -1 on error */
83static inline int hash_add(struct hashtable_t *hash, 80static inline int hash_add(struct hashtable_t *hash,
84 hashdata_compare_cb compare, 81 hashdata_compare_cb compare,
85 hashdata_choose_cb choose, void *data) 82 hashdata_choose_cb choose,
83 void *data, struct hlist_node *data_node)
86{ 84{
87 int index; 85 int index;
88 struct hlist_head *head; 86 struct hlist_head *head;
89 struct hlist_node *walk, *safe; 87 struct hlist_node *node;
90 struct element_t *bucket; 88 spinlock_t *list_lock; /* spinlock to protect write access */
91 89
92 if (!hash) 90 if (!hash)
93 return -1; 91 goto err;
94 92
95 index = choose(data, hash->size); 93 index = choose(data, hash->size);
96 head = &hash->table[index]; 94 head = &hash->table[index];
95 list_lock = &hash->list_locks[index];
96
97 rcu_read_lock();
98 __hlist_for_each_rcu(node, head) {
99 if (!compare(node, data))
100 continue;
97 101
98 hlist_for_each_safe(walk, safe, head) { 102 goto err_unlock;
99 bucket = hlist_entry(walk, struct element_t, hlist);
100 if (compare(bucket->data, data))
101 return -1;
102 } 103 }
104 rcu_read_unlock();
103 105
104 /* no duplicate found in list, add new element */ 106 /* no duplicate found in list, add new element */
105 bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC); 107 spin_lock_bh(list_lock);
106 108 hlist_add_head_rcu(data_node, head);
107 if (!bucket) 109 spin_unlock_bh(list_lock);
108 return -1;
109
110 bucket->data = data;
111 hlist_add_head(&bucket->hlist, head);
112 110
113 return 0; 111 return 0;
112
113err_unlock:
114 rcu_read_unlock();
115err:
116 return -1;
114} 117}
115 118
116/* removes data from hash, if found. returns pointer do data on success, so you 119/* removes data from hash, if found. returns pointer do data on success, so you
@@ -122,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash,
122 hashdata_choose_cb choose, void *data) 125 hashdata_choose_cb choose, void *data)
123{ 126{
124 size_t index; 127 size_t index;
125 struct hlist_node *walk; 128 struct hlist_node *node;
126 struct element_t *bucket;
127 struct hlist_head *head; 129 struct hlist_head *head;
128 void *data_save; 130 void *data_save = NULL;
129 131
130 index = choose(data, hash->size); 132 index = choose(data, hash->size);
131 head = &hash->table[index]; 133 head = &hash->table[index];
132 134
133 hlist_for_each_entry(bucket, walk, head, hlist) { 135 spin_lock_bh(&hash->list_locks[index]);
134 if (compare(bucket->data, data)) { 136 hlist_for_each(node, head) {
135 data_save = bucket->data; 137 if (!compare(node, data))
136 hlist_del(walk); 138 continue;
137 kfree(bucket);
138 return data_save;
139 }
140 }
141
142 return NULL;
143}
144
145/* finds data, based on the key in keydata. returns the found data on success,
146 * or NULL on error */
147static inline void *hash_find(struct hashtable_t *hash,
148 hashdata_compare_cb compare,
149 hashdata_choose_cb choose, void *keydata)
150{
151 int index;
152 struct hlist_head *head;
153 struct hlist_node *walk;
154 struct element_t *bucket;
155
156 if (!hash)
157 return NULL;
158
159 index = choose(keydata , hash->size);
160 head = &hash->table[index];
161 139
162 hlist_for_each(walk, head) { 140 data_save = node;
163 bucket = hlist_entry(walk, struct element_t, hlist); 141 hlist_del_rcu(node);
164 if (compare(bucket->data, keydata)) 142 break;
165 return bucket->data;
166 } 143 }
144 spin_unlock_bh(&hash->list_locks[index]);
167 145
168 return NULL; 146 return data_save;
169} 147}
170 148
171#endif /* _NET_BATMAN_ADV_HASH_H_ */ 149#endif /* _NET_BATMAN_ADV_HASH_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 319a7ccf6efa..34ce56c358e5 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -156,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
156 struct sk_buff *skb; 156 struct sk_buff *skb;
157 struct icmp_packet_rr *icmp_packet; 157 struct icmp_packet_rr *icmp_packet;
158 158
159 struct orig_node *orig_node; 159 struct orig_node *orig_node = NULL;
160 struct batman_if *batman_if; 160 struct neigh_node *neigh_node = NULL;
161 size_t packet_len = sizeof(struct icmp_packet); 161 size_t packet_len = sizeof(struct icmp_packet);
162 uint8_t dstaddr[ETH_ALEN];
163 162
164 if (len < sizeof(struct icmp_packet)) { 163 if (len < sizeof(struct icmp_packet)) {
165 bat_dbg(DBG_BATMAN, bat_priv, 164 bat_dbg(DBG_BATMAN, bat_priv,
@@ -219,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
219 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
220 goto dst_unreach; 219 goto dst_unreach;
221 220
222 spin_lock_bh(&bat_priv->orig_hash_lock); 221 rcu_read_lock();
223 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, 222 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
224 compare_orig, choose_orig,
225 icmp_packet->dst));
226 223
227 if (!orig_node) 224 if (!orig_node)
228 goto unlock; 225 goto unlock;
229 226
230 if (!orig_node->router) 227 neigh_node = orig_node->router;
228
229 if (!neigh_node)
231 goto unlock; 230 goto unlock;
232 231
233 batman_if = orig_node->router->if_incoming; 232 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
234 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 233 neigh_node = NULL;
234 goto unlock;
235 }
235 236
236 spin_unlock_bh(&bat_priv->orig_hash_lock); 237 rcu_read_unlock();
237 238
238 if (!batman_if) 239 if (!neigh_node->if_incoming)
239 goto dst_unreach; 240 goto dst_unreach;
240 241
241 if (batman_if->if_status != IF_ACTIVE) 242 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
242 goto dst_unreach; 243 goto dst_unreach;
243 244
244 memcpy(icmp_packet->orig, 245 memcpy(icmp_packet->orig,
245 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 246 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
246 247
247 if (packet_len == sizeof(struct icmp_packet_rr)) 248 if (packet_len == sizeof(struct icmp_packet_rr))
248 memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN); 249 memcpy(icmp_packet->rr,
249 250 neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
250
251 send_skb_packet(skb, batman_if, dstaddr);
252 251
252 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
253 goto out; 253 goto out;
254 254
255unlock: 255unlock:
256 spin_unlock_bh(&bat_priv->orig_hash_lock); 256 rcu_read_unlock();
257dst_unreach: 257dst_unreach:
258 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 258 icmp_packet->msg_type = DESTINATION_UNREACHABLE;
259 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 259 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
260free_skb: 260free_skb:
261 kfree_skb(skb); 261 kfree_skb(skb);
262out: 262out:
263 if (neigh_node)
264 neigh_node_free_ref(neigh_node);
265 if (orig_node)
266 orig_node_free_ref(orig_node);
263 return len; 267 return len;
264} 268}
265 269
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 06d956c91c27..709b33bbdf43 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -33,7 +33,7 @@
33#include "vis.h" 33#include "vis.h"
34#include "hash.h" 34#include "hash.h"
35 35
36struct list_head if_list; 36struct list_head hardif_list;
37 37
38unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 38unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
39 39
@@ -41,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue;
41 41
42static int __init batman_init(void) 42static int __init batman_init(void)
43{ 43{
44 INIT_LIST_HEAD(&if_list); 44 INIT_LIST_HEAD(&hardif_list);
45 45
46 /* the name should not be longer than 10 chars - see 46 /* the name should not be longer than 10 chars - see
47 * http://lwn.net/Articles/23634/ */ 47 * http://lwn.net/Articles/23634/ */
@@ -79,7 +79,6 @@ int mesh_init(struct net_device *soft_iface)
79{ 79{
80 struct bat_priv *bat_priv = netdev_priv(soft_iface); 80 struct bat_priv *bat_priv = netdev_priv(soft_iface);
81 81
82 spin_lock_init(&bat_priv->orig_hash_lock);
83 spin_lock_init(&bat_priv->forw_bat_list_lock); 82 spin_lock_init(&bat_priv->forw_bat_list_lock);
84 spin_lock_init(&bat_priv->forw_bcast_list_lock); 83 spin_lock_init(&bat_priv->forw_bcast_list_lock);
85 spin_lock_init(&bat_priv->hna_lhash_lock); 84 spin_lock_init(&bat_priv->hna_lhash_lock);
@@ -154,14 +153,14 @@ void dec_module_count(void)
154 153
155int is_my_mac(uint8_t *addr) 154int is_my_mac(uint8_t *addr)
156{ 155{
157 struct batman_if *batman_if; 156 struct hard_iface *hard_iface;
158 157
159 rcu_read_lock(); 158 rcu_read_lock();
160 list_for_each_entry_rcu(batman_if, &if_list, list) { 159 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
161 if (batman_if->if_status != IF_ACTIVE) 160 if (hard_iface->if_status != IF_ACTIVE)
162 continue; 161 continue;
163 162
164 if (compare_orig(batman_if->net_dev->dev_addr, addr)) { 163 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
165 rcu_read_unlock(); 164 rcu_read_unlock();
166 return 1; 165 return 1;
167 } 166 }
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index e235d7bbe045..dc248697de71 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -122,7 +122,7 @@
122#define REVISION_VERSION_STR " "REVISION_VERSION 122#define REVISION_VERSION_STR " "REVISION_VERSION
123#endif 123#endif
124 124
125extern struct list_head if_list; 125extern struct list_head hardif_list;
126 126
127extern unsigned char broadcast_addr[]; 127extern unsigned char broadcast_addr[];
128extern struct workqueue_struct *bat_event_workqueue; 128extern struct workqueue_struct *bat_event_workqueue;
@@ -165,4 +165,14 @@ static inline void bat_dbg(char type __always_unused,
165 pr_err("%s: " fmt, _netdev->name, ## arg); \ 165 pr_err("%s: " fmt, _netdev->name, ## arg); \
166 } while (0) 166 } while (0)
167 167
168/**
169 * returns 1 if they are the same ethernet addr
170 *
171 * note: can't use compare_ether_addr() as it requires aligned memory
172 */
173static inline int compare_eth(void *data1, void *data2)
174{
175 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
176}
177
168#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 178#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 54863c9385de..0b9133022d2d 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv)
44 if (bat_priv->orig_hash) 44 if (bat_priv->orig_hash)
45 return 1; 45 return 1;
46 46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024); 47 bat_priv->orig_hash = hash_new(1024);
49 48
50 if (!bat_priv->orig_hash) 49 if (!bat_priv->orig_hash)
51 goto err; 50 goto err;
52 51
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv); 52 start_purge_timer(bat_priv);
55 return 1; 53 return 1;
56 54
57err: 55err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0; 56 return 0;
60} 57}
61 58
62struct neigh_node * 59static void neigh_node_free_rcu(struct rcu_head *rcu)
63create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, 60{
64 uint8_t *neigh, struct batman_if *if_incoming) 61 struct neigh_node *neigh_node;
62
63 neigh_node = container_of(rcu, struct neigh_node, rcu);
64 kfree(neigh_node);
65}
66
67void neigh_node_free_ref(struct neigh_node *neigh_node)
68{
69 if (atomic_dec_and_test(&neigh_node->refcount))
70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
71}
72
73struct neigh_node *create_neighbor(struct orig_node *orig_node,
74 struct orig_node *orig_neigh_node,
75 uint8_t *neigh,
76 struct hard_iface *if_incoming)
65{ 77{
66 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 78 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
67 struct neigh_node *neigh_node; 79 struct neigh_node *neigh_node;
@@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
73 if (!neigh_node) 85 if (!neigh_node)
74 return NULL; 86 return NULL;
75 87
76 INIT_LIST_HEAD(&neigh_node->list); 88 INIT_HLIST_NODE(&neigh_node->list);
89 INIT_LIST_HEAD(&neigh_node->bonding_list);
77 90
78 memcpy(neigh_node->addr, neigh, ETH_ALEN); 91 memcpy(neigh_node->addr, neigh, ETH_ALEN);
79 neigh_node->orig_node = orig_neigh_node; 92 neigh_node->orig_node = orig_neigh_node;
80 neigh_node->if_incoming = if_incoming; 93 neigh_node->if_incoming = if_incoming;
81 94
82 list_add_tail(&neigh_node->list, &orig_node->neigh_list); 95 /* extra reference for return */
96 atomic_set(&neigh_node->refcount, 2);
97
98 spin_lock_bh(&orig_node->neigh_list_lock);
99 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
100 spin_unlock_bh(&orig_node->neigh_list_lock);
83 return neigh_node; 101 return neigh_node;
84} 102}
85 103
86static void free_orig_node(void *data, void *arg) 104static void orig_node_free_rcu(struct rcu_head *rcu)
87{ 105{
88 struct list_head *list_pos, *list_pos_tmp; 106 struct hlist_node *node, *node_tmp;
89 struct neigh_node *neigh_node; 107 struct neigh_node *neigh_node, *tmp_neigh_node;
90 struct orig_node *orig_node = (struct orig_node *)data; 108 struct orig_node *orig_node;
91 struct bat_priv *bat_priv = (struct bat_priv *)arg;
92 109
93 /* for all neighbors towards this originator ... */ 110 orig_node = container_of(rcu, struct orig_node, rcu);
94 list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { 111
95 neigh_node = list_entry(list_pos, struct neigh_node, list); 112 spin_lock_bh(&orig_node->neigh_list_lock);
113
114 /* for all bonding members ... */
115 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
116 &orig_node->bond_list, bonding_list) {
117 list_del_rcu(&neigh_node->bonding_list);
118 neigh_node_free_ref(neigh_node);
119 }
96 120
97 list_del(list_pos); 121 /* for all neighbors towards this originator ... */
98 kfree(neigh_node); 122 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
123 &orig_node->neigh_list, list) {
124 hlist_del_rcu(&neigh_node->list);
125 neigh_node_free_ref(neigh_node);
99 } 126 }
100 127
128 spin_unlock_bh(&orig_node->neigh_list_lock);
129
101 frag_list_free(&orig_node->frag_list); 130 frag_list_free(&orig_node->frag_list);
102 hna_global_del_orig(bat_priv, orig_node, "originator timed out"); 131 hna_global_del_orig(orig_node->bat_priv, orig_node,
132 "originator timed out");
103 133
104 kfree(orig_node->bcast_own); 134 kfree(orig_node->bcast_own);
105 kfree(orig_node->bcast_own_sum); 135 kfree(orig_node->bcast_own_sum);
106 kfree(orig_node); 136 kfree(orig_node);
107} 137}
108 138
139void orig_node_free_ref(struct orig_node *orig_node)
140{
141 if (atomic_dec_and_test(&orig_node->refcount))
142 call_rcu(&orig_node->rcu, orig_node_free_rcu);
143}
144
109void originator_free(struct bat_priv *bat_priv) 145void originator_free(struct bat_priv *bat_priv)
110{ 146{
111 if (!bat_priv->orig_hash) 147 struct hashtable_t *hash = bat_priv->orig_hash;
148 struct hlist_node *node, *node_tmp;
149 struct hlist_head *head;
150 spinlock_t *list_lock; /* spinlock to protect write access */
151 struct orig_node *orig_node;
152 int i;
153
154 if (!hash)
112 return; 155 return;
113 156
114 cancel_delayed_work_sync(&bat_priv->orig_work); 157 cancel_delayed_work_sync(&bat_priv->orig_work);
115 158
116 spin_lock_bh(&bat_priv->orig_hash_lock);
117 hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
118 bat_priv->orig_hash = NULL; 159 bat_priv->orig_hash = NULL;
119 spin_unlock_bh(&bat_priv->orig_hash_lock); 160
161 for (i = 0; i < hash->size; i++) {
162 head = &hash->table[i];
163 list_lock = &hash->list_locks[i];
164
165 spin_lock_bh(list_lock);
166 hlist_for_each_entry_safe(orig_node, node, node_tmp,
167 head, hash_entry) {
168
169 hlist_del_rcu(node);
170 orig_node_free_ref(orig_node);
171 }
172 spin_unlock_bh(list_lock);
173 }
174
175 hash_destroy(hash);
120} 176}
121 177
122/* this function finds or creates an originator entry for the given 178/* this function finds or creates an originator entry for the given
@@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
127 int size; 183 int size;
128 int hash_added; 184 int hash_added;
129 185
130 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, 186 orig_node = orig_hash_find(bat_priv, addr);
131 compare_orig, choose_orig,
132 addr));
133
134 if (orig_node) 187 if (orig_node)
135 return orig_node; 188 return orig_node;
136 189
@@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
141 if (!orig_node) 194 if (!orig_node)
142 return NULL; 195 return NULL;
143 196
144 INIT_LIST_HEAD(&orig_node->neigh_list); 197 INIT_HLIST_HEAD(&orig_node->neigh_list);
198 INIT_LIST_HEAD(&orig_node->bond_list);
199 spin_lock_init(&orig_node->ogm_cnt_lock);
200 spin_lock_init(&orig_node->bcast_seqno_lock);
201 spin_lock_init(&orig_node->neigh_list_lock);
202
203 /* extra reference for return */
204 atomic_set(&orig_node->refcount, 2);
145 205
206 orig_node->bat_priv = bat_priv;
146 memcpy(orig_node->orig, addr, ETH_ALEN); 207 memcpy(orig_node->orig, addr, ETH_ALEN);
147 orig_node->router = NULL; 208 orig_node->router = NULL;
148 orig_node->hna_buff = NULL; 209 orig_node->hna_buff = NULL;
@@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
151 orig_node->batman_seqno_reset = jiffies - 1 212 orig_node->batman_seqno_reset = jiffies - 1
152 - msecs_to_jiffies(RESET_PROTECTION_MS); 213 - msecs_to_jiffies(RESET_PROTECTION_MS);
153 214
215 atomic_set(&orig_node->bond_candidates, 0);
216
154 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; 217 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
155 218
156 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 219 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
@@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
166 if (!orig_node->bcast_own_sum) 229 if (!orig_node->bcast_own_sum)
167 goto free_bcast_own; 230 goto free_bcast_own;
168 231
169 hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig, 232 hash_added = hash_add(bat_priv->orig_hash, compare_orig,
170 orig_node); 233 choose_orig, orig_node, &orig_node->hash_entry);
171 if (hash_added < 0) 234 if (hash_added < 0)
172 goto free_bcast_own_sum; 235 goto free_bcast_own_sum;
173 236
@@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
185 struct orig_node *orig_node, 248 struct orig_node *orig_node,
186 struct neigh_node **best_neigh_node) 249 struct neigh_node **best_neigh_node)
187{ 250{
188 struct list_head *list_pos, *list_pos_tmp; 251 struct hlist_node *node, *node_tmp;
189 struct neigh_node *neigh_node; 252 struct neigh_node *neigh_node;
190 bool neigh_purged = false; 253 bool neigh_purged = false;
191 254
192 *best_neigh_node = NULL; 255 *best_neigh_node = NULL;
193 256
257 spin_lock_bh(&orig_node->neigh_list_lock);
258
194 /* for all neighbors towards this originator ... */ 259 /* for all neighbors towards this originator ... */
195 list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { 260 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
196 neigh_node = list_entry(list_pos, struct neigh_node, list); 261 &orig_node->neigh_list, list) {
197 262
198 if ((time_after(jiffies, 263 if ((time_after(jiffies,
199 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || 264 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
200 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 265 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
266 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
201 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 267 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
202 268
203 if (neigh_node->if_incoming->if_status == 269 if ((neigh_node->if_incoming->if_status ==
204 IF_TO_BE_REMOVED) 270 IF_INACTIVE) ||
271 (neigh_node->if_incoming->if_status ==
272 IF_NOT_IN_USE) ||
273 (neigh_node->if_incoming->if_status ==
274 IF_TO_BE_REMOVED))
205 bat_dbg(DBG_BATMAN, bat_priv, 275 bat_dbg(DBG_BATMAN, bat_priv,
206 "neighbor purge: originator %pM, " 276 "neighbor purge: originator %pM, "
207 "neighbor: %pM, iface: %s\n", 277 "neighbor: %pM, iface: %s\n",
@@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
215 (neigh_node->last_valid / HZ)); 285 (neigh_node->last_valid / HZ));
216 286
217 neigh_purged = true; 287 neigh_purged = true;
218 list_del(list_pos); 288
219 kfree(neigh_node); 289 hlist_del_rcu(&neigh_node->list);
290 bonding_candidate_del(orig_node, neigh_node);
291 neigh_node_free_ref(neigh_node);
220 } else { 292 } else {
221 if ((!*best_neigh_node) || 293 if ((!*best_neigh_node) ||
222 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 294 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
223 *best_neigh_node = neigh_node; 295 *best_neigh_node = neigh_node;
224 } 296 }
225 } 297 }
298
299 spin_unlock_bh(&orig_node->neigh_list_lock);
226 return neigh_purged; 300 return neigh_purged;
227} 301}
228 302
@@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
245 best_neigh_node, 319 best_neigh_node,
246 orig_node->hna_buff, 320 orig_node->hna_buff,
247 orig_node->hna_buff_len); 321 orig_node->hna_buff_len);
248 /* update bonding candidates, we could have lost
249 * some candidates. */
250 update_bonding_candidates(orig_node);
251 } 322 }
252 } 323 }
253 324
@@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
257static void _purge_orig(struct bat_priv *bat_priv) 328static void _purge_orig(struct bat_priv *bat_priv)
258{ 329{
259 struct hashtable_t *hash = bat_priv->orig_hash; 330 struct hashtable_t *hash = bat_priv->orig_hash;
260 struct hlist_node *walk, *safe; 331 struct hlist_node *node, *node_tmp;
261 struct hlist_head *head; 332 struct hlist_head *head;
262 struct element_t *bucket; 333 spinlock_t *list_lock; /* spinlock to protect write access */
263 struct orig_node *orig_node; 334 struct orig_node *orig_node;
264 int i; 335 int i;
265 336
266 if (!hash) 337 if (!hash)
267 return; 338 return;
268 339
269 spin_lock_bh(&bat_priv->orig_hash_lock);
270
271 /* for all origins... */ 340 /* for all origins... */
272 for (i = 0; i < hash->size; i++) { 341 for (i = 0; i < hash->size; i++) {
273 head = &hash->table[i]; 342 head = &hash->table[i];
343 list_lock = &hash->list_locks[i];
274 344
275 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { 345 spin_lock_bh(list_lock);
276 orig_node = bucket->data; 346 hlist_for_each_entry_safe(orig_node, node, node_tmp,
277 347 head, hash_entry) {
278 if (purge_orig_node(bat_priv, orig_node)) { 348 if (purge_orig_node(bat_priv, orig_node)) {
279 if (orig_node->gw_flags) 349 if (orig_node->gw_flags)
280 gw_node_delete(bat_priv, orig_node); 350 gw_node_delete(bat_priv, orig_node);
281 hlist_del(walk); 351 hlist_del_rcu(node);
282 kfree(bucket); 352 orig_node_free_ref(orig_node);
283 free_orig_node(orig_node, bat_priv); 353 continue;
284 } 354 }
285 355
286 if (time_after(jiffies, orig_node->last_frag_packet + 356 if (time_after(jiffies, orig_node->last_frag_packet +
287 msecs_to_jiffies(FRAG_TIMEOUT))) 357 msecs_to_jiffies(FRAG_TIMEOUT)))
288 frag_list_free(&orig_node->frag_list); 358 frag_list_free(&orig_node->frag_list);
289 } 359 }
360 spin_unlock_bh(list_lock);
290 } 361 }
291 362
292 spin_unlock_bh(&bat_priv->orig_hash_lock);
293
294 gw_node_purge(bat_priv); 363 gw_node_purge(bat_priv);
295 gw_election(bat_priv); 364 gw_election(bat_priv);
296 365
@@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
318 struct net_device *net_dev = (struct net_device *)seq->private; 387 struct net_device *net_dev = (struct net_device *)seq->private;
319 struct bat_priv *bat_priv = netdev_priv(net_dev); 388 struct bat_priv *bat_priv = netdev_priv(net_dev);
320 struct hashtable_t *hash = bat_priv->orig_hash; 389 struct hashtable_t *hash = bat_priv->orig_hash;
321 struct hlist_node *walk; 390 struct hlist_node *node, *node_tmp;
322 struct hlist_head *head; 391 struct hlist_head *head;
323 struct element_t *bucket;
324 struct orig_node *orig_node; 392 struct orig_node *orig_node;
325 struct neigh_node *neigh_node; 393 struct neigh_node *neigh_node;
326 int batman_count = 0; 394 int batman_count = 0;
@@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
348 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 416 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
349 "outgoingIF", "Potential nexthops"); 417 "outgoingIF", "Potential nexthops");
350 418
351 spin_lock_bh(&bat_priv->orig_hash_lock);
352
353 for (i = 0; i < hash->size; i++) { 419 for (i = 0; i < hash->size; i++) {
354 head = &hash->table[i]; 420 head = &hash->table[i];
355 421
356 hlist_for_each_entry(bucket, walk, head, hlist) { 422 rcu_read_lock();
357 orig_node = bucket->data; 423 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
358
359 if (!orig_node->router) 424 if (!orig_node->router)
360 continue; 425 continue;
361 426
@@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
374 neigh_node->addr, 439 neigh_node->addr,
375 neigh_node->if_incoming->net_dev->name); 440 neigh_node->if_incoming->net_dev->name);
376 441
377 list_for_each_entry(neigh_node, &orig_node->neigh_list, 442 hlist_for_each_entry_rcu(neigh_node, node_tmp,
378 list) { 443 &orig_node->neigh_list, list) {
379 seq_printf(seq, " %pM (%3i)", neigh_node->addr, 444 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
380 neigh_node->tq_avg); 445 neigh_node->tq_avg);
381 } 446 }
@@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
383 seq_printf(seq, "\n"); 448 seq_printf(seq, "\n");
384 batman_count++; 449 batman_count++;
385 } 450 }
451 rcu_read_unlock();
386 } 452 }
387 453
388 spin_unlock_bh(&bat_priv->orig_hash_lock);
389
390 if ((batman_count == 0)) 454 if ((batman_count == 0))
391 seq_printf(seq, "No batman nodes in range ...\n"); 455 seq_printf(seq, "No batman nodes in range ...\n");
392 456
@@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
423 return 0; 487 return 0;
424} 488}
425 489
426int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) 490int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
427{ 491{
428 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 492 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
429 struct hashtable_t *hash = bat_priv->orig_hash; 493 struct hashtable_t *hash = bat_priv->orig_hash;
430 struct hlist_node *walk; 494 struct hlist_node *node;
431 struct hlist_head *head; 495 struct hlist_head *head;
432 struct element_t *bucket;
433 struct orig_node *orig_node; 496 struct orig_node *orig_node;
434 int i; 497 int i, ret;
435 498
436 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 499 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
437 * if_num */ 500 * if_num */
438 spin_lock_bh(&bat_priv->orig_hash_lock);
439
440 for (i = 0; i < hash->size; i++) { 501 for (i = 0; i < hash->size; i++) {
441 head = &hash->table[i]; 502 head = &hash->table[i];
442 503
443 hlist_for_each_entry(bucket, walk, head, hlist) { 504 rcu_read_lock();
444 orig_node = bucket->data; 505 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
506 spin_lock_bh(&orig_node->ogm_cnt_lock);
507 ret = orig_node_add_if(orig_node, max_if_num);
508 spin_unlock_bh(&orig_node->ogm_cnt_lock);
445 509
446 if (orig_node_add_if(orig_node, max_if_num) == -1) 510 if (ret == -1)
447 goto err; 511 goto err;
448 } 512 }
513 rcu_read_unlock();
449 } 514 }
450 515
451 spin_unlock_bh(&bat_priv->orig_hash_lock);
452 return 0; 516 return 0;
453 517
454err: 518err:
455 spin_unlock_bh(&bat_priv->orig_hash_lock); 519 rcu_read_unlock();
456 return -ENOMEM; 520 return -ENOMEM;
457} 521}
458 522
@@ -508,57 +572,55 @@ free_own_sum:
508 return 0; 572 return 0;
509} 573}
510 574
511int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) 575int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
512{ 576{
513 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 577 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
514 struct hashtable_t *hash = bat_priv->orig_hash; 578 struct hashtable_t *hash = bat_priv->orig_hash;
515 struct hlist_node *walk; 579 struct hlist_node *node;
516 struct hlist_head *head; 580 struct hlist_head *head;
517 struct element_t *bucket; 581 struct hard_iface *hard_iface_tmp;
518 struct batman_if *batman_if_tmp;
519 struct orig_node *orig_node; 582 struct orig_node *orig_node;
520 int i, ret; 583 int i, ret;
521 584
522 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 585 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
523 * if_num */ 586 * if_num */
524 spin_lock_bh(&bat_priv->orig_hash_lock);
525
526 for (i = 0; i < hash->size; i++) { 587 for (i = 0; i < hash->size; i++) {
527 head = &hash->table[i]; 588 head = &hash->table[i];
528 589
529 hlist_for_each_entry(bucket, walk, head, hlist) { 590 rcu_read_lock();
530 orig_node = bucket->data; 591 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
531 592 spin_lock_bh(&orig_node->ogm_cnt_lock);
532 ret = orig_node_del_if(orig_node, max_if_num, 593 ret = orig_node_del_if(orig_node, max_if_num,
533 batman_if->if_num); 594 hard_iface->if_num);
595 spin_unlock_bh(&orig_node->ogm_cnt_lock);
534 596
535 if (ret == -1) 597 if (ret == -1)
536 goto err; 598 goto err;
537 } 599 }
600 rcu_read_unlock();
538 } 601 }
539 602
540 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 603 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
541 rcu_read_lock(); 604 rcu_read_lock();
542 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) { 605 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
543 if (batman_if_tmp->if_status == IF_NOT_IN_USE) 606 if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
544 continue; 607 continue;
545 608
546 if (batman_if == batman_if_tmp) 609 if (hard_iface == hard_iface_tmp)
547 continue; 610 continue;
548 611
549 if (batman_if->soft_iface != batman_if_tmp->soft_iface) 612 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
550 continue; 613 continue;
551 614
552 if (batman_if_tmp->if_num > batman_if->if_num) 615 if (hard_iface_tmp->if_num > hard_iface->if_num)
553 batman_if_tmp->if_num--; 616 hard_iface_tmp->if_num--;
554 } 617 }
555 rcu_read_unlock(); 618 rcu_read_unlock();
556 619
557 batman_if->if_num = -1; 620 hard_iface->if_num = -1;
558 spin_unlock_bh(&bat_priv->orig_hash_lock);
559 return 0; 621 return 0;
560 622
561err: 623err:
562 spin_unlock_bh(&bat_priv->orig_hash_lock); 624 rcu_read_unlock();
563 return -ENOMEM; 625 return -ENOMEM;
564} 626}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 8019fbddffd0..5cc011057da1 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -22,21 +22,28 @@
22#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ 22#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
23#define _NET_BATMAN_ADV_ORIGINATOR_H_ 23#define _NET_BATMAN_ADV_ORIGINATOR_H_
24 24
25#include "hash.h"
26
25int originator_init(struct bat_priv *bat_priv); 27int originator_init(struct bat_priv *bat_priv);
26void originator_free(struct bat_priv *bat_priv); 28void originator_free(struct bat_priv *bat_priv);
27void purge_orig_ref(struct bat_priv *bat_priv); 29void purge_orig_ref(struct bat_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node);
28struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); 31struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
29struct neigh_node * 32struct neigh_node *create_neighbor(struct orig_node *orig_node,
30create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, 33 struct orig_node *orig_neigh_node,
31 uint8_t *neigh, struct batman_if *if_incoming); 34 uint8_t *neigh,
35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node);
32int orig_seq_print_text(struct seq_file *seq, void *offset); 37int orig_seq_print_text(struct seq_file *seq, void *offset);
33int orig_hash_add_if(struct batman_if *batman_if, int max_if_num); 38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
34int orig_hash_del_if(struct batman_if *batman_if, int max_if_num); 39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
35 40
36 41
37/* returns 1 if they are the same originator */ 42/* returns 1 if they are the same originator */
38static inline int compare_orig(void *data1, void *data2) 43static inline int compare_orig(struct hlist_node *node, void *data2)
39{ 44{
45 void *data1 = container_of(node, struct orig_node, hash_entry);
46
40 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
41} 48}
42 49
@@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size)
61 return hash % size; 68 return hash % size;
62} 69}
63 70
71static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
72 void *data)
73{
74 struct hashtable_t *hash = bat_priv->orig_hash;
75 struct hlist_head *head;
76 struct hlist_node *node;
77 struct orig_node *orig_node, *orig_node_tmp = NULL;
78 int index;
79
80 if (!hash)
81 return NULL;
82
83 index = choose_orig(data, hash->size);
84 head = &hash->table[index];
85
86 rcu_read_lock();
87 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
88 if (!compare_eth(orig_node, data))
89 continue;
90
91 if (!atomic_inc_not_zero(&orig_node->refcount))
92 continue;
93
94 orig_node_tmp = orig_node;
95 break;
96 }
97 rcu_read_unlock();
98
99 return orig_node_tmp;
100}
101
64#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ 102#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 827414067e46..c172f5d0e05a 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -35,35 +35,33 @@
35#include "gateway_client.h" 35#include "gateway_client.h"
36#include "unicast.h" 36#include "unicast.h"
37 37
38void slide_own_bcast_window(struct batman_if *batman_if) 38void slide_own_bcast_window(struct hard_iface *hard_iface)
39{ 39{
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 40 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash; 41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk; 42 struct hlist_node *node;
43 struct hlist_head *head; 43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node; 44 struct orig_node *orig_node;
46 unsigned long *word; 45 unsigned long *word;
47 int i; 46 int i;
48 size_t word_index; 47 size_t word_index;
49 48
50 spin_lock_bh(&bat_priv->orig_hash_lock);
51
52 for (i = 0; i < hash->size; i++) { 49 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i]; 50 head = &hash->table[i];
54 51
55 hlist_for_each_entry(bucket, walk, head, hlist) { 52 rcu_read_lock();
56 orig_node = bucket->data; 53 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
57 word_index = batman_if->if_num * NUM_WORDS; 54 spin_lock_bh(&orig_node->ogm_cnt_lock);
55 word_index = hard_iface->if_num * NUM_WORDS;
58 word = &(orig_node->bcast_own[word_index]); 56 word = &(orig_node->bcast_own[word_index]);
59 57
60 bit_get_packet(bat_priv, word, 1, 0); 58 bit_get_packet(bat_priv, word, 1, 0);
61 orig_node->bcast_own_sum[batman_if->if_num] = 59 orig_node->bcast_own_sum[hard_iface->if_num] =
62 bit_packet_count(word); 60 bit_packet_count(word);
61 spin_unlock_bh(&orig_node->ogm_cnt_lock);
63 } 62 }
63 rcu_read_unlock();
64 } 64 }
65
66 spin_unlock_bh(&bat_priv->orig_hash_lock);
67} 65}
68 66
69static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, 67static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
@@ -89,6 +87,8 @@ static void update_route(struct bat_priv *bat_priv,
89 struct neigh_node *neigh_node, 87 struct neigh_node *neigh_node,
90 unsigned char *hna_buff, int hna_buff_len) 88 unsigned char *hna_buff, int hna_buff_len)
91{ 89{
90 struct neigh_node *neigh_node_tmp;
91
92 /* route deleted */ 92 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) { 93 if ((orig_node->router) && (!neigh_node)) {
94 94
@@ -115,7 +115,12 @@ static void update_route(struct bat_priv *bat_priv,
115 orig_node->router->addr); 115 orig_node->router->addr);
116 } 116 }
117 117
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router;
118 orig_node->router = neigh_node; 121 orig_node->router = neigh_node;
122 if (neigh_node_tmp)
123 neigh_node_free_ref(neigh_node_tmp);
119} 124}
120 125
121 126
@@ -138,73 +143,93 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
138static int is_bidirectional_neigh(struct orig_node *orig_node, 143static int is_bidirectional_neigh(struct orig_node *orig_node,
139 struct orig_node *orig_neigh_node, 144 struct orig_node *orig_neigh_node,
140 struct batman_packet *batman_packet, 145 struct batman_packet *batman_packet,
141 struct batman_if *if_incoming) 146 struct hard_iface *if_incoming)
142{ 147{
143 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 148 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
144 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 149 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
150 struct hlist_node *node;
145 unsigned char total_count; 151 unsigned char total_count;
152 uint8_t orig_eq_count, neigh_rq_count, tq_own;
153 int tq_asym_penalty, ret = 0;
146 154
147 if (orig_node == orig_neigh_node) { 155 if (orig_node == orig_neigh_node) {
148 list_for_each_entry(tmp_neigh_node, 156 rcu_read_lock();
149 &orig_node->neigh_list, 157 hlist_for_each_entry_rcu(tmp_neigh_node, node,
150 list) { 158 &orig_node->neigh_list, list) {
151 159
152 if (compare_orig(tmp_neigh_node->addr, 160 if (!compare_eth(tmp_neigh_node->addr,
153 orig_neigh_node->orig) && 161 orig_neigh_node->orig))
154 (tmp_neigh_node->if_incoming == if_incoming)) 162 continue;
155 neigh_node = tmp_neigh_node; 163
164 if (tmp_neigh_node->if_incoming != if_incoming)
165 continue;
166
167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
168 continue;
169
170 neigh_node = tmp_neigh_node;
156 } 171 }
172 rcu_read_unlock();
157 173
158 if (!neigh_node) 174 if (!neigh_node)
159 neigh_node = create_neighbor(orig_node, 175 neigh_node = create_neighbor(orig_node,
160 orig_neigh_node, 176 orig_neigh_node,
161 orig_neigh_node->orig, 177 orig_neigh_node->orig,
162 if_incoming); 178 if_incoming);
163 /* create_neighbor failed, return 0 */
164 if (!neigh_node) 179 if (!neigh_node)
165 return 0; 180 goto out;
166 181
167 neigh_node->last_valid = jiffies; 182 neigh_node->last_valid = jiffies;
168 } else { 183 } else {
169 /* find packet count of corresponding one hop neighbor */ 184 /* find packet count of corresponding one hop neighbor */
170 list_for_each_entry(tmp_neigh_node, 185 rcu_read_lock();
171 &orig_neigh_node->neigh_list, list) { 186 hlist_for_each_entry_rcu(tmp_neigh_node, node,
187 &orig_neigh_node->neigh_list, list) {
188
189 if (!compare_eth(tmp_neigh_node->addr,
190 orig_neigh_node->orig))
191 continue;
192
193 if (tmp_neigh_node->if_incoming != if_incoming)
194 continue;
172 195
173 if (compare_orig(tmp_neigh_node->addr, 196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
174 orig_neigh_node->orig) && 197 continue;
175 (tmp_neigh_node->if_incoming == if_incoming)) 198
176 neigh_node = tmp_neigh_node; 199 neigh_node = tmp_neigh_node;
177 } 200 }
201 rcu_read_unlock();
178 202
179 if (!neigh_node) 203 if (!neigh_node)
180 neigh_node = create_neighbor(orig_neigh_node, 204 neigh_node = create_neighbor(orig_neigh_node,
181 orig_neigh_node, 205 orig_neigh_node,
182 orig_neigh_node->orig, 206 orig_neigh_node->orig,
183 if_incoming); 207 if_incoming);
184 /* create_neighbor failed, return 0 */
185 if (!neigh_node) 208 if (!neigh_node)
186 return 0; 209 goto out;
187 } 210 }
188 211
189 orig_node->last_valid = jiffies; 212 orig_node->last_valid = jiffies;
190 213
214 spin_lock_bh(&orig_node->ogm_cnt_lock);
215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
216 neigh_rq_count = neigh_node->real_packet_count;
217 spin_unlock_bh(&orig_node->ogm_cnt_lock);
218
191 /* pay attention to not get a value bigger than 100 % */ 219 /* pay attention to not get a value bigger than 100 % */
192 total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] > 220 total_count = (orig_eq_count > neigh_rq_count ?
193 neigh_node->real_packet_count ? 221 neigh_rq_count : orig_eq_count);
194 neigh_node->real_packet_count :
195 orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
196 222
197 /* if we have too few packets (too less data) we set tq_own to zero */ 223 /* if we have too few packets (too less data) we set tq_own to zero */
198 /* if we receive too few packets it is not considered bidirectional */ 224 /* if we receive too few packets it is not considered bidirectional */
199 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || 225 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
200 (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) 226 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
201 orig_neigh_node->tq_own = 0; 227 tq_own = 0;
202 else 228 else
203 /* neigh_node->real_packet_count is never zero as we 229 /* neigh_node->real_packet_count is never zero as we
204 * only purge old information when getting new 230 * only purge old information when getting new
205 * information */ 231 * information */
206 orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) / 232 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
207 neigh_node->real_packet_count;
208 233
209 /* 234 /*
210 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does 235 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
@@ -212,20 +237,16 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
212 * punishes asymmetric links more. This will give a value 237 * punishes asymmetric links more. This will give a value
213 * between 0 and TQ_MAX_VALUE 238 * between 0 and TQ_MAX_VALUE
214 */ 239 */
215 orig_neigh_node->tq_asym_penalty = 240 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
216 TQ_MAX_VALUE - 241 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
217 (TQ_MAX_VALUE * 242 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
218 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * 243 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
219 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * 244 (TQ_LOCAL_WINDOW_SIZE *
220 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) / 245 TQ_LOCAL_WINDOW_SIZE *
221 (TQ_LOCAL_WINDOW_SIZE * 246 TQ_LOCAL_WINDOW_SIZE);
222 TQ_LOCAL_WINDOW_SIZE * 247
223 TQ_LOCAL_WINDOW_SIZE); 248 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
224 249 (TQ_MAX_VALUE * TQ_MAX_VALUE));
225 batman_packet->tq = ((batman_packet->tq *
226 orig_neigh_node->tq_own *
227 orig_neigh_node->tq_asym_penalty) /
228 (TQ_MAX_VALUE * TQ_MAX_VALUE));
229 250
230 bat_dbg(DBG_BATMAN, bat_priv, 251 bat_dbg(DBG_BATMAN, bat_priv,
231 "bidirectional: " 252 "bidirectional: "
@@ -233,34 +254,141 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
233 "real recv = %2i, local tq: %3i, asym_penalty: %3i, " 254 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
234 "total tq: %3i\n", 255 "total tq: %3i\n",
235 orig_node->orig, orig_neigh_node->orig, total_count, 256 orig_node->orig, orig_neigh_node->orig, total_count,
236 neigh_node->real_packet_count, orig_neigh_node->tq_own, 257 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
237 orig_neigh_node->tq_asym_penalty, batman_packet->tq);
238 258
239 /* if link has the minimum required transmission quality 259 /* if link has the minimum required transmission quality
240 * consider it bidirectional */ 260 * consider it bidirectional */
241 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) 261 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
242 return 1; 262 ret = 1;
243 263
244 return 0; 264out:
265 if (neigh_node)
266 neigh_node_free_ref(neigh_node);
267 return ret;
268}
269
270/* caller must hold the neigh_list_lock */
271void bonding_candidate_del(struct orig_node *orig_node,
272 struct neigh_node *neigh_node)
273{
274 /* this neighbor is not part of our candidate list */
275 if (list_empty(&neigh_node->bonding_list))
276 goto out;
277
278 list_del_rcu(&neigh_node->bonding_list);
279 INIT_LIST_HEAD(&neigh_node->bonding_list);
280 neigh_node_free_ref(neigh_node);
281 atomic_dec(&orig_node->bond_candidates);
282
283out:
284 return;
285}
286
287static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node)
289{
290 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node;
292 uint8_t best_tq, interference_candidate = 0;
293
294 spin_lock_bh(&orig_node->neigh_list_lock);
295
296 /* only consider if it has the same primary address ... */
297 if (!compare_eth(orig_node->orig,
298 neigh_node->orig_node->primary_addr))
299 goto candidate_del;
300
301 if (!orig_node->router)
302 goto candidate_del;
303
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
308 goto candidate_del;
309
310 /**
311 * check if we have another candidate with the same mac address or
312 * interface. If we do, we won't select this candidate because of
313 * possible interference.
314 */
315 hlist_for_each_entry_rcu(tmp_neigh_node, node,
316 &orig_node->neigh_list, list) {
317
318 if (tmp_neigh_node == neigh_node)
319 continue;
320
321 /* we only care if the other candidate is even
322 * considered as candidate. */
323 if (list_empty(&tmp_neigh_node->bonding_list))
324 continue;
325
326 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
327 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
328 interference_candidate = 1;
329 break;
330 }
331 }
332
333 /* don't care further if it is an interference candidate */
334 if (interference_candidate)
335 goto candidate_del;
336
337 /* this neighbor already is part of our candidate list */
338 if (!list_empty(&neigh_node->bonding_list))
339 goto out;
340
341 if (!atomic_inc_not_zero(&neigh_node->refcount))
342 goto out;
343
344 list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
345 atomic_inc(&orig_node->bond_candidates);
346 goto out;
347
348candidate_del:
349 bonding_candidate_del(orig_node, neigh_node);
350
351out:
352 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return;
354}
355
356/* copy primary address for bonding */
357static void bonding_save_primary(struct orig_node *orig_node,
358 struct orig_node *orig_neigh_node,
359 struct batman_packet *batman_packet)
360{
361 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
362 return;
363
364 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
245} 365}
246 366
247static void update_orig(struct bat_priv *bat_priv, 367static void update_orig(struct bat_priv *bat_priv,
248 struct orig_node *orig_node, 368 struct orig_node *orig_node,
249 struct ethhdr *ethhdr, 369 struct ethhdr *ethhdr,
250 struct batman_packet *batman_packet, 370 struct batman_packet *batman_packet,
251 struct batman_if *if_incoming, 371 struct hard_iface *if_incoming,
252 unsigned char *hna_buff, int hna_buff_len, 372 unsigned char *hna_buff, int hna_buff_len,
253 char is_duplicate) 373 char is_duplicate)
254{ 374{
255 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
376 struct orig_node *orig_node_tmp;
377 struct hlist_node *node;
256 int tmp_hna_buff_len; 378 int tmp_hna_buff_len;
379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
257 380
258 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
259 "Searching and updating originator entry of received packet\n"); 382 "Searching and updating originator entry of received packet\n");
260 383
261 list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { 384 rcu_read_lock();
262 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && 385 hlist_for_each_entry_rcu(tmp_neigh_node, node,
263 (tmp_neigh_node->if_incoming == if_incoming)) { 386 &orig_node->neigh_list, list) {
387 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
388 (tmp_neigh_node->if_incoming == if_incoming) &&
389 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
390 if (neigh_node)
391 neigh_node_free_ref(neigh_node);
264 neigh_node = tmp_neigh_node; 392 neigh_node = tmp_neigh_node;
265 continue; 393 continue;
266 } 394 }
@@ -279,16 +407,20 @@ static void update_orig(struct bat_priv *bat_priv,
279 407
280 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); 408 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
281 if (!orig_tmp) 409 if (!orig_tmp)
282 return; 410 goto unlock;
283 411
284 neigh_node = create_neighbor(orig_node, orig_tmp, 412 neigh_node = create_neighbor(orig_node, orig_tmp,
285 ethhdr->h_source, if_incoming); 413 ethhdr->h_source, if_incoming);
414
415 orig_node_free_ref(orig_tmp);
286 if (!neigh_node) 416 if (!neigh_node)
287 return; 417 goto unlock;
288 } else 418 } else
289 bat_dbg(DBG_BATMAN, bat_priv, 419 bat_dbg(DBG_BATMAN, bat_priv,
290 "Updating existing last-hop neighbor of originator\n"); 420 "Updating existing last-hop neighbor of originator\n");
291 421
422 rcu_read_unlock();
423
292 orig_node->flags = batman_packet->flags; 424 orig_node->flags = batman_packet->flags;
293 neigh_node->last_valid = jiffies; 425 neigh_node->last_valid = jiffies;
294 426
@@ -302,6 +434,8 @@ static void update_orig(struct bat_priv *bat_priv,
302 neigh_node->last_ttl = batman_packet->ttl; 434 neigh_node->last_ttl = batman_packet->ttl;
303 } 435 }
304 436
437 bonding_candidate_add(orig_node, neigh_node);
438
305 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? 439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
306 batman_packet->num_hna * ETH_ALEN : hna_buff_len); 440 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
307 441
@@ -318,10 +452,22 @@ static void update_orig(struct bat_priv *bat_priv,
318 /* if the TQ is the same and the link not more symetric we 452 /* if the TQ is the same and the link not more symetric we
319 * won't consider it either */ 453 * won't consider it either */
320 if ((orig_node->router) && 454 if ((orig_node->router) &&
321 ((neigh_node->tq_avg == orig_node->router->tq_avg) && 455 (neigh_node->tq_avg == orig_node->router->tq_avg)) {
322 (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num] 456 orig_node_tmp = orig_node->router->orig_node;
323 >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num]))) 457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
324 goto update_hna; 458 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
460 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
461
462 orig_node_tmp = neigh_node->orig_node;
463 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
464 bcast_own_sum_neigh =
465 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
467
468 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
469 goto update_hna;
470 }
325 471
326 update_routes(bat_priv, orig_node, neigh_node, 472 update_routes(bat_priv, orig_node, neigh_node,
327 hna_buff, tmp_hna_buff_len); 473 hna_buff, tmp_hna_buff_len);
@@ -342,6 +488,14 @@ update_gw:
342 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && 488 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
343 (atomic_read(&bat_priv->gw_sel_class) > 2)) 489 (atomic_read(&bat_priv->gw_sel_class) > 2))
344 gw_check_election(bat_priv, orig_node); 490 gw_check_election(bat_priv, orig_node);
491
492 goto out;
493
494unlock:
495 rcu_read_unlock();
496out:
497 if (neigh_node)
498 neigh_node_free_ref(neigh_node);
345} 499}
346 500
347/* checks whether the host restarted and is in the protection time. 501/* checks whether the host restarted and is in the protection time.
@@ -379,34 +533,38 @@ static int window_protected(struct bat_priv *bat_priv,
379 */ 533 */
380static char count_real_packets(struct ethhdr *ethhdr, 534static char count_real_packets(struct ethhdr *ethhdr,
381 struct batman_packet *batman_packet, 535 struct batman_packet *batman_packet,
382 struct batman_if *if_incoming) 536 struct hard_iface *if_incoming)
383{ 537{
384 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
385 struct orig_node *orig_node; 539 struct orig_node *orig_node;
386 struct neigh_node *tmp_neigh_node; 540 struct neigh_node *tmp_neigh_node;
541 struct hlist_node *node;
387 char is_duplicate = 0; 542 char is_duplicate = 0;
388 int32_t seq_diff; 543 int32_t seq_diff;
389 int need_update = 0; 544 int need_update = 0;
390 int set_mark; 545 int set_mark, ret = -1;
391 546
392 orig_node = get_orig_node(bat_priv, batman_packet->orig); 547 orig_node = get_orig_node(bat_priv, batman_packet->orig);
393 if (!orig_node) 548 if (!orig_node)
394 return 0; 549 return 0;
395 550
551 spin_lock_bh(&orig_node->ogm_cnt_lock);
396 seq_diff = batman_packet->seqno - orig_node->last_real_seqno; 552 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
397 553
398 /* signalize caller that the packet is to be dropped. */ 554 /* signalize caller that the packet is to be dropped. */
399 if (window_protected(bat_priv, seq_diff, 555 if (window_protected(bat_priv, seq_diff,
400 &orig_node->batman_seqno_reset)) 556 &orig_node->batman_seqno_reset))
401 return -1; 557 goto out;
402 558
403 list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { 559 rcu_read_lock();
560 hlist_for_each_entry_rcu(tmp_neigh_node, node,
561 &orig_node->neigh_list, list) {
404 562
405 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, 563 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
406 orig_node->last_real_seqno, 564 orig_node->last_real_seqno,
407 batman_packet->seqno); 565 batman_packet->seqno);
408 566
409 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && 567 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
410 (tmp_neigh_node->if_incoming == if_incoming)) 568 (tmp_neigh_node->if_incoming == if_incoming))
411 set_mark = 1; 569 set_mark = 1;
412 else 570 else
@@ -420,6 +578,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
420 tmp_neigh_node->real_packet_count = 578 tmp_neigh_node->real_packet_count =
421 bit_packet_count(tmp_neigh_node->real_bits); 579 bit_packet_count(tmp_neigh_node->real_bits);
422 } 580 }
581 rcu_read_unlock();
423 582
424 if (need_update) { 583 if (need_update) {
425 bat_dbg(DBG_BATMAN, bat_priv, 584 bat_dbg(DBG_BATMAN, bat_priv,
@@ -428,121 +587,21 @@ static char count_real_packets(struct ethhdr *ethhdr,
428 orig_node->last_real_seqno = batman_packet->seqno; 587 orig_node->last_real_seqno = batman_packet->seqno;
429 } 588 }
430 589
431 return is_duplicate; 590 ret = is_duplicate;
432}
433
434/* copy primary address for bonding */
435static void mark_bonding_address(struct orig_node *orig_node,
436 struct orig_node *orig_neigh_node,
437 struct batman_packet *batman_packet)
438
439{
440 if (batman_packet->flags & PRIMARIES_FIRST_HOP)
441 memcpy(orig_neigh_node->primary_addr,
442 orig_node->orig, ETH_ALEN);
443
444 return;
445}
446
447/* mark possible bond.candidates in the neighbor list */
448void update_bonding_candidates(struct orig_node *orig_node)
449{
450 int candidates;
451 int interference_candidate;
452 int best_tq;
453 struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
454 struct neigh_node *first_candidate, *last_candidate;
455
456 /* update the candidates for this originator */
457 if (!orig_node->router) {
458 orig_node->bond.candidates = 0;
459 return;
460 }
461
462 best_tq = orig_node->router->tq_avg;
463
464 /* update bond.candidates */
465
466 candidates = 0;
467
468 /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
469 * as "bonding partner" */
470
471 /* first, zero the list */
472 list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
473 tmp_neigh_node->next_bond_candidate = NULL;
474 }
475
476 first_candidate = NULL;
477 last_candidate = NULL;
478 list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
479
480 /* only consider if it has the same primary address ... */
481 if (memcmp(orig_node->orig,
482 tmp_neigh_node->orig_node->primary_addr,
483 ETH_ALEN) != 0)
484 continue;
485
486 /* ... and is good enough to be considered */
487 if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
488 continue;
489
490 /* check if we have another candidate with the same
491 * mac address or interface. If we do, we won't
492 * select this candidate because of possible interference. */
493
494 interference_candidate = 0;
495 list_for_each_entry(tmp_neigh_node2,
496 &orig_node->neigh_list, list) {
497
498 if (tmp_neigh_node2 == tmp_neigh_node)
499 continue;
500
501 /* we only care if the other candidate is even
502 * considered as candidate. */
503 if (!tmp_neigh_node2->next_bond_candidate)
504 continue;
505
506
507 if ((tmp_neigh_node->if_incoming ==
508 tmp_neigh_node2->if_incoming)
509 || (memcmp(tmp_neigh_node->addr,
510 tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
511
512 interference_candidate = 1;
513 break;
514 }
515 }
516 /* don't care further if it is an interference candidate */
517 if (interference_candidate)
518 continue;
519
520 if (!first_candidate) {
521 first_candidate = tmp_neigh_node;
522 tmp_neigh_node->next_bond_candidate = first_candidate;
523 } else
524 tmp_neigh_node->next_bond_candidate = last_candidate;
525
526 last_candidate = tmp_neigh_node;
527
528 candidates++;
529 }
530
531 if (candidates > 0) {
532 first_candidate->next_bond_candidate = last_candidate;
533 orig_node->bond.selected = first_candidate;
534 }
535 591
536 orig_node->bond.candidates = candidates; 592out:
593 spin_unlock_bh(&orig_node->ogm_cnt_lock);
594 orig_node_free_ref(orig_node);
595 return ret;
537} 596}
538 597
539void receive_bat_packet(struct ethhdr *ethhdr, 598void receive_bat_packet(struct ethhdr *ethhdr,
540 struct batman_packet *batman_packet, 599 struct batman_packet *batman_packet,
541 unsigned char *hna_buff, int hna_buff_len, 600 unsigned char *hna_buff, int hna_buff_len,
542 struct batman_if *if_incoming) 601 struct hard_iface *if_incoming)
543{ 602{
544 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
545 struct batman_if *batman_if; 604 struct hard_iface *hard_iface;
546 struct orig_node *orig_neigh_node, *orig_node; 605 struct orig_node *orig_neigh_node, *orig_node;
547 char has_directlink_flag; 606 char has_directlink_flag;
548 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
@@ -570,8 +629,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
570 629
571 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); 630 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
572 631
573 is_single_hop_neigh = (compare_orig(ethhdr->h_source, 632 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
574 batman_packet->orig) ? 1 : 0); 633 batman_packet->orig) ? 1 : 0);
575 634
576 bat_dbg(DBG_BATMAN, bat_priv, 635 bat_dbg(DBG_BATMAN, bat_priv,
577 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 636 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
@@ -584,26 +643,26 @@ void receive_bat_packet(struct ethhdr *ethhdr,
584 has_directlink_flag); 643 has_directlink_flag);
585 644
586 rcu_read_lock(); 645 rcu_read_lock();
587 list_for_each_entry_rcu(batman_if, &if_list, list) { 646 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
588 if (batman_if->if_status != IF_ACTIVE) 647 if (hard_iface->if_status != IF_ACTIVE)
589 continue; 648 continue;
590 649
591 if (batman_if->soft_iface != if_incoming->soft_iface) 650 if (hard_iface->soft_iface != if_incoming->soft_iface)
592 continue; 651 continue;
593 652
594 if (compare_orig(ethhdr->h_source, 653 if (compare_eth(ethhdr->h_source,
595 batman_if->net_dev->dev_addr)) 654 hard_iface->net_dev->dev_addr))
596 is_my_addr = 1; 655 is_my_addr = 1;
597 656
598 if (compare_orig(batman_packet->orig, 657 if (compare_eth(batman_packet->orig,
599 batman_if->net_dev->dev_addr)) 658 hard_iface->net_dev->dev_addr))
600 is_my_orig = 1; 659 is_my_orig = 1;
601 660
602 if (compare_orig(batman_packet->prev_sender, 661 if (compare_eth(batman_packet->prev_sender,
603 batman_if->net_dev->dev_addr)) 662 hard_iface->net_dev->dev_addr))
604 is_my_oldorig = 1; 663 is_my_oldorig = 1;
605 664
606 if (compare_orig(ethhdr->h_source, broadcast_addr)) 665 if (compare_eth(ethhdr->h_source, broadcast_addr))
607 is_broadcast = 1; 666 is_broadcast = 1;
608 } 667 }
609 rcu_read_unlock(); 668 rcu_read_unlock();
@@ -635,7 +694,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
635 int offset; 694 int offset;
636 695
637 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); 696 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
638
639 if (!orig_neigh_node) 697 if (!orig_neigh_node)
640 return; 698 return;
641 699
@@ -644,18 +702,22 @@ void receive_bat_packet(struct ethhdr *ethhdr,
644 /* if received seqno equals last send seqno save new 702 /* if received seqno equals last send seqno save new
645 * seqno for bidirectional check */ 703 * seqno for bidirectional check */
646 if (has_directlink_flag && 704 if (has_directlink_flag &&
647 compare_orig(if_incoming->net_dev->dev_addr, 705 compare_eth(if_incoming->net_dev->dev_addr,
648 batman_packet->orig) && 706 batman_packet->orig) &&
649 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { 707 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
650 offset = if_incoming->if_num * NUM_WORDS; 708 offset = if_incoming->if_num * NUM_WORDS;
709
710 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
651 word = &(orig_neigh_node->bcast_own[offset]); 711 word = &(orig_neigh_node->bcast_own[offset]);
652 bit_mark(word, 0); 712 bit_mark(word, 0);
653 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 713 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
654 bit_packet_count(word); 714 bit_packet_count(word);
715 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
655 } 716 }
656 717
657 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 718 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
658 "originator packet from myself (via neighbor)\n"); 719 "originator packet from myself (via neighbor)\n");
720 orig_node_free_ref(orig_neigh_node);
659 return; 721 return;
660 } 722 }
661 723
@@ -676,27 +738,27 @@ void receive_bat_packet(struct ethhdr *ethhdr,
676 bat_dbg(DBG_BATMAN, bat_priv, 738 bat_dbg(DBG_BATMAN, bat_priv,
677 "Drop packet: packet within seqno protection time " 739 "Drop packet: packet within seqno protection time "
678 "(sender: %pM)\n", ethhdr->h_source); 740 "(sender: %pM)\n", ethhdr->h_source);
679 return; 741 goto out;
680 } 742 }
681 743
682 if (batman_packet->tq == 0) { 744 if (batman_packet->tq == 0) {
683 bat_dbg(DBG_BATMAN, bat_priv, 745 bat_dbg(DBG_BATMAN, bat_priv,
684 "Drop packet: originator packet with tq equal 0\n"); 746 "Drop packet: originator packet with tq equal 0\n");
685 return; 747 goto out;
686 } 748 }
687 749
688 /* avoid temporary routing loops */ 750 /* avoid temporary routing loops */
689 if ((orig_node->router) && 751 if ((orig_node->router) &&
690 (orig_node->router->orig_node->router) && 752 (orig_node->router->orig_node->router) &&
691 (compare_orig(orig_node->router->addr, 753 (compare_eth(orig_node->router->addr,
692 batman_packet->prev_sender)) && 754 batman_packet->prev_sender)) &&
693 !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) && 755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
694 (compare_orig(orig_node->router->addr, 756 (compare_eth(orig_node->router->addr,
695 orig_node->router->orig_node->router->addr))) { 757 orig_node->router->orig_node->router->addr))) {
696 bat_dbg(DBG_BATMAN, bat_priv, 758 bat_dbg(DBG_BATMAN, bat_priv,
697 "Drop packet: ignoring all rebroadcast packets that " 759 "Drop packet: ignoring all rebroadcast packets that "
698 "may make me loop (sender: %pM)\n", ethhdr->h_source); 760 "may make me loop (sender: %pM)\n", ethhdr->h_source);
699 return; 761 goto out;
700 } 762 }
701 763
702 /* if sender is a direct neighbor the sender mac equals 764 /* if sender is a direct neighbor the sender mac equals
@@ -705,19 +767,21 @@ void receive_bat_packet(struct ethhdr *ethhdr,
705 orig_node : 767 orig_node :
706 get_orig_node(bat_priv, ethhdr->h_source)); 768 get_orig_node(bat_priv, ethhdr->h_source));
707 if (!orig_neigh_node) 769 if (!orig_neigh_node)
708 return; 770 goto out;
709 771
710 /* drop packet if sender is not a direct neighbor and if we 772 /* drop packet if sender is not a direct neighbor and if we
711 * don't route towards it */ 773 * don't route towards it */
712 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
713 bat_dbg(DBG_BATMAN, bat_priv, 775 bat_dbg(DBG_BATMAN, bat_priv,
714 "Drop packet: OGM via unknown neighbor!\n"); 776 "Drop packet: OGM via unknown neighbor!\n");
715 return; 777 goto out_neigh;
716 } 778 }
717 779
718 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, 780 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
719 batman_packet, if_incoming); 781 batman_packet, if_incoming);
720 782
783 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
784
721 /* update ranking if it is not a duplicate or has the same 785 /* update ranking if it is not a duplicate or has the same
722 * seqno and similar ttl as the non-duplicate */ 786 * seqno and similar ttl as the non-duplicate */
723 if (is_bidirectional && 787 if (is_bidirectional &&
@@ -727,9 +791,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
727 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 791 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
728 if_incoming, hna_buff, hna_buff_len, is_duplicate); 792 if_incoming, hna_buff, hna_buff_len, is_duplicate);
729 793
730 mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
731 update_bonding_candidates(orig_node);
732
733 /* is single hop (direct) neighbor */ 794 /* is single hop (direct) neighbor */
734 if (is_single_hop_neigh) { 795 if (is_single_hop_neigh) {
735 796
@@ -739,31 +800,36 @@ void receive_bat_packet(struct ethhdr *ethhdr,
739 800
740 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
741 "rebroadcast neighbor packet with direct link flag\n"); 802 "rebroadcast neighbor packet with direct link flag\n");
742 return; 803 goto out_neigh;
743 } 804 }
744 805
745 /* multihop originator */ 806 /* multihop originator */
746 if (!is_bidirectional) { 807 if (!is_bidirectional) {
747 bat_dbg(DBG_BATMAN, bat_priv, 808 bat_dbg(DBG_BATMAN, bat_priv,
748 "Drop packet: not received via bidirectional link\n"); 809 "Drop packet: not received via bidirectional link\n");
749 return; 810 goto out_neigh;
750 } 811 }
751 812
752 if (is_duplicate) { 813 if (is_duplicate) {
753 bat_dbg(DBG_BATMAN, bat_priv, 814 bat_dbg(DBG_BATMAN, bat_priv,
754 "Drop packet: duplicate packet received\n"); 815 "Drop packet: duplicate packet received\n");
755 return; 816 goto out_neigh;
756 } 817 }
757 818
758 bat_dbg(DBG_BATMAN, bat_priv, 819 bat_dbg(DBG_BATMAN, bat_priv,
759 "Forwarding packet: rebroadcast originator packet\n"); 820 "Forwarding packet: rebroadcast originator packet\n");
760 schedule_forward_packet(orig_node, ethhdr, batman_packet, 821 schedule_forward_packet(orig_node, ethhdr, batman_packet,
761 0, hna_buff_len, if_incoming); 822 0, hna_buff_len, if_incoming);
823
824out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node);
827out:
828 orig_node_free_ref(orig_node);
762} 829}
763 830
764int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) 831int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
765{ 832{
766 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
767 struct ethhdr *ethhdr; 833 struct ethhdr *ethhdr;
768 834
769 /* drop packet if it has not necessary minimum size */ 835 /* drop packet if it has not necessary minimum size */
@@ -790,12 +856,10 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
790 856
791 ethhdr = (struct ethhdr *)skb_mac_header(skb); 857 ethhdr = (struct ethhdr *)skb_mac_header(skb);
792 858
793 spin_lock_bh(&bat_priv->orig_hash_lock);
794 receive_aggr_bat_packet(ethhdr, 859 receive_aggr_bat_packet(ethhdr,
795 skb->data, 860 skb->data,
796 skb_headlen(skb), 861 skb_headlen(skb),
797 batman_if); 862 hard_iface);
798 spin_unlock_bh(&bat_priv->orig_hash_lock);
799 863
800 kfree_skb(skb); 864 kfree_skb(skb);
801 return NET_RX_SUCCESS; 865 return NET_RX_SUCCESS;
@@ -804,68 +868,75 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
804static int recv_my_icmp_packet(struct bat_priv *bat_priv, 868static int recv_my_icmp_packet(struct bat_priv *bat_priv,
805 struct sk_buff *skb, size_t icmp_len) 869 struct sk_buff *skb, size_t icmp_len)
806{ 870{
807 struct orig_node *orig_node; 871 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL;
808 struct icmp_packet_rr *icmp_packet; 873 struct icmp_packet_rr *icmp_packet;
809 struct batman_if *batman_if; 874 int ret = NET_RX_DROP;
810 int ret;
811 uint8_t dstaddr[ETH_ALEN];
812 875
813 icmp_packet = (struct icmp_packet_rr *)skb->data; 876 icmp_packet = (struct icmp_packet_rr *)skb->data;
814 877
815 /* add data to device queue */ 878 /* add data to device queue */
816 if (icmp_packet->msg_type != ECHO_REQUEST) { 879 if (icmp_packet->msg_type != ECHO_REQUEST) {
817 bat_socket_receive_packet(icmp_packet, icmp_len); 880 bat_socket_receive_packet(icmp_packet, icmp_len);
818 return NET_RX_DROP; 881 goto out;
819 } 882 }
820 883
821 if (!bat_priv->primary_if) 884 if (!bat_priv->primary_if)
822 return NET_RX_DROP; 885 goto out;
823 886
824 /* answer echo request (ping) */ 887 /* answer echo request (ping) */
825 /* get routing information */ 888 /* get routing information */
826 spin_lock_bh(&bat_priv->orig_hash_lock); 889 rcu_read_lock();
827 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, 890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
828 compare_orig, choose_orig, 891
829 icmp_packet->orig)); 892 if (!orig_node)
830 ret = NET_RX_DROP; 893 goto unlock;
831
832 if ((orig_node) && (orig_node->router)) {
833
834 /* don't lock while sending the packets ... we therefore
835 * copy the required data before sending */
836 batman_if = orig_node->router->if_incoming;
837 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
838 spin_unlock_bh(&bat_priv->orig_hash_lock);
839
840 /* create a copy of the skb, if needed, to modify it. */
841 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
842 return NET_RX_DROP;
843 894
844 icmp_packet = (struct icmp_packet_rr *)skb->data; 895 neigh_node = orig_node->router;
845 896
846 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 897 if (!neigh_node)
847 memcpy(icmp_packet->orig, 898 goto unlock;
848 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
849 icmp_packet->msg_type = ECHO_REPLY;
850 icmp_packet->ttl = TTL;
851 899
852 send_skb_packet(skb, batman_if, dstaddr); 900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
853 ret = NET_RX_SUCCESS; 901 neigh_node = NULL;
902 goto unlock;
903 }
854 904
855 } else 905 rcu_read_unlock();
856 spin_unlock_bh(&bat_priv->orig_hash_lock); 906
907 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
909 goto out;
910
911 icmp_packet = (struct icmp_packet_rr *)skb->data;
857 912
913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig,
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL;
918
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
920 ret = NET_RX_SUCCESS;
921 goto out;
922
923unlock:
924 rcu_read_unlock();
925out:
926 if (neigh_node)
927 neigh_node_free_ref(neigh_node);
928 if (orig_node)
929 orig_node_free_ref(orig_node);
858 return ret; 930 return ret;
859} 931}
860 932
861static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 933static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
862 struct sk_buff *skb) 934 struct sk_buff *skb)
863{ 935{
864 struct orig_node *orig_node; 936 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL;
865 struct icmp_packet *icmp_packet; 938 struct icmp_packet *icmp_packet;
866 struct batman_if *batman_if; 939 int ret = NET_RX_DROP;
867 int ret;
868 uint8_t dstaddr[ETH_ALEN];
869 940
870 icmp_packet = (struct icmp_packet *)skb->data; 941 icmp_packet = (struct icmp_packet *)skb->data;
871 942
@@ -874,59 +945,67 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
874 pr_debug("Warning - can't forward icmp packet from %pM to " 945 pr_debug("Warning - can't forward icmp packet from %pM to "
875 "%pM: ttl exceeded\n", icmp_packet->orig, 946 "%pM: ttl exceeded\n", icmp_packet->orig,
876 icmp_packet->dst); 947 icmp_packet->dst);
877 return NET_RX_DROP; 948 goto out;
878 } 949 }
879 950
880 if (!bat_priv->primary_if) 951 if (!bat_priv->primary_if)
881 return NET_RX_DROP; 952 goto out;
882 953
883 /* get routing information */ 954 /* get routing information */
884 spin_lock_bh(&bat_priv->orig_hash_lock); 955 rcu_read_lock();
885 orig_node = ((struct orig_node *) 956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
886 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
887 icmp_packet->orig));
888 ret = NET_RX_DROP;
889
890 if ((orig_node) && (orig_node->router)) {
891
892 /* don't lock while sending the packets ... we therefore
893 * copy the required data before sending */
894 batman_if = orig_node->router->if_incoming;
895 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
896 spin_unlock_bh(&bat_priv->orig_hash_lock);
897
898 /* create a copy of the skb, if needed, to modify it. */
899 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
900 return NET_RX_DROP;
901 957
902 icmp_packet = (struct icmp_packet *) skb->data; 958 if (!orig_node)
959 goto unlock;
903 960
904 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 961 neigh_node = orig_node->router;
905 memcpy(icmp_packet->orig,
906 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
907 icmp_packet->msg_type = TTL_EXCEEDED;
908 icmp_packet->ttl = TTL;
909 962
910 send_skb_packet(skb, batman_if, dstaddr); 963 if (!neigh_node)
911 ret = NET_RX_SUCCESS; 964 goto unlock;
912 965
913 } else 966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
914 spin_unlock_bh(&bat_priv->orig_hash_lock); 967 neigh_node = NULL;
968 goto unlock;
969 }
970
971 rcu_read_unlock();
972
973 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
975 goto out;
915 976
977 icmp_packet = (struct icmp_packet *)skb->data;
978
979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig,
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL;
984
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
986 ret = NET_RX_SUCCESS;
987 goto out;
988
989unlock:
990 rcu_read_unlock();
991out:
992 if (neigh_node)
993 neigh_node_free_ref(neigh_node);
994 if (orig_node)
995 orig_node_free_ref(orig_node);
916 return ret; 996 return ret;
917} 997}
918 998
919 999
920int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) 1000int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
921{ 1001{
922 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1002 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
923 struct icmp_packet_rr *icmp_packet; 1003 struct icmp_packet_rr *icmp_packet;
924 struct ethhdr *ethhdr; 1004 struct ethhdr *ethhdr;
925 struct orig_node *orig_node; 1005 struct orig_node *orig_node = NULL;
926 struct batman_if *batman_if; 1006 struct neigh_node *neigh_node = NULL;
927 int hdr_size = sizeof(struct icmp_packet); 1007 int hdr_size = sizeof(struct icmp_packet);
928 int ret; 1008 int ret = NET_RX_DROP;
929 uint8_t dstaddr[ETH_ALEN];
930 1009
931 /** 1010 /**
932 * we truncate all incoming icmp packets if they don't match our size 1011 * we truncate all incoming icmp packets if they don't match our size
@@ -936,21 +1015,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
936 1015
937 /* drop packet if it has not necessary minimum size */ 1016 /* drop packet if it has not necessary minimum size */
938 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1017 if (unlikely(!pskb_may_pull(skb, hdr_size)))
939 return NET_RX_DROP; 1018 goto out;
940 1019
941 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1020 ethhdr = (struct ethhdr *)skb_mac_header(skb);
942 1021
943 /* packet with unicast indication but broadcast recipient */ 1022 /* packet with unicast indication but broadcast recipient */
944 if (is_broadcast_ether_addr(ethhdr->h_dest)) 1023 if (is_broadcast_ether_addr(ethhdr->h_dest))
945 return NET_RX_DROP; 1024 goto out;
946 1025
947 /* packet with broadcast sender address */ 1026 /* packet with broadcast sender address */
948 if (is_broadcast_ether_addr(ethhdr->h_source)) 1027 if (is_broadcast_ether_addr(ethhdr->h_source))
949 return NET_RX_DROP; 1028 goto out;
950 1029
951 /* not for me */ 1030 /* not for me */
952 if (!is_my_mac(ethhdr->h_dest)) 1031 if (!is_my_mac(ethhdr->h_dest))
953 return NET_RX_DROP; 1032 goto out;
954 1033
955 icmp_packet = (struct icmp_packet_rr *)skb->data; 1034 icmp_packet = (struct icmp_packet_rr *)skb->data;
956 1035
@@ -970,50 +1049,59 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
970 if (icmp_packet->ttl < 2) 1049 if (icmp_packet->ttl < 2)
971 return recv_icmp_ttl_exceeded(bat_priv, skb); 1050 return recv_icmp_ttl_exceeded(bat_priv, skb);
972 1051
973 ret = NET_RX_DROP;
974
975 /* get routing information */ 1052 /* get routing information */
976 spin_lock_bh(&bat_priv->orig_hash_lock); 1053 rcu_read_lock();
977 orig_node = ((struct orig_node *) 1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
978 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
979 icmp_packet->dst));
980 1055
981 if ((orig_node) && (orig_node->router)) { 1056 if (!orig_node)
1057 goto unlock;
982 1058
983 /* don't lock while sending the packets ... we therefore 1059 neigh_node = orig_node->router;
984 * copy the required data before sending */
985 batman_if = orig_node->router->if_incoming;
986 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
987 spin_unlock_bh(&bat_priv->orig_hash_lock);
988 1060
989 /* create a copy of the skb, if needed, to modify it. */ 1061 if (!neigh_node)
990 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1062 goto unlock;
991 return NET_RX_DROP;
992 1063
993 icmp_packet = (struct icmp_packet_rr *)skb->data; 1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
994 1068
995 /* decrement ttl */ 1069 rcu_read_unlock();
996 icmp_packet->ttl--;
997 1070
998 /* route it */ 1071 /* create a copy of the skb, if needed, to modify it. */
999 send_skb_packet(skb, batman_if, dstaddr); 1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1000 ret = NET_RX_SUCCESS; 1073 goto out;
1001 1074
1002 } else 1075 icmp_packet = (struct icmp_packet_rr *)skb->data;
1003 spin_unlock_bh(&bat_priv->orig_hash_lock); 1076
1077 /* decrement ttl */
1078 icmp_packet->ttl--;
1079
1080 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1082 ret = NET_RX_SUCCESS;
1083 goto out;
1004 1084
1085unlock:
1086 rcu_read_unlock();
1087out:
1088 if (neigh_node)
1089 neigh_node_free_ref(neigh_node);
1090 if (orig_node)
1091 orig_node_free_ref(orig_node);
1005 return ret; 1092 return ret;
1006} 1093}
1007 1094
1008/* find a suitable router for this originator, and use 1095/* find a suitable router for this originator, and use
1009 * bonding if possible. */ 1096 * bonding if possible. increases the found neighbors
1097 * refcount.*/
1010struct neigh_node *find_router(struct bat_priv *bat_priv, 1098struct neigh_node *find_router(struct bat_priv *bat_priv,
1011 struct orig_node *orig_node, 1099 struct orig_node *orig_node,
1012 struct batman_if *recv_if) 1100 struct hard_iface *recv_if)
1013{ 1101{
1014 struct orig_node *primary_orig_node; 1102 struct orig_node *primary_orig_node;
1015 struct orig_node *router_orig; 1103 struct orig_node *router_orig;
1016 struct neigh_node *router, *first_candidate, *best_router; 1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node;
1017 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1018 int bonding_enabled; 1106 int bonding_enabled;
1019 1107
@@ -1025,78 +1113,128 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1025 1113
1026 /* without bonding, the first node should 1114 /* without bonding, the first node should
1027 * always choose the default router. */ 1115 * always choose the default router. */
1028
1029 bonding_enabled = atomic_read(&bat_priv->bonding); 1116 bonding_enabled = atomic_read(&bat_priv->bonding);
1030 1117
1031 if ((!recv_if) && (!bonding_enabled)) 1118 rcu_read_lock();
1032 return orig_node->router; 1119 /* select default router to output */
1033 1120 router = orig_node->router;
1034 router_orig = orig_node->router->orig_node; 1121 router_orig = orig_node->router->orig_node;
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
1123 rcu_read_unlock();
1124 return NULL;
1125 }
1126
1127 if ((!recv_if) && (!bonding_enabled))
1128 goto return_router;
1035 1129
1036 /* if we have something in the primary_addr, we can search 1130 /* if we have something in the primary_addr, we can search
1037 * for a potential bonding candidate. */ 1131 * for a potential bonding candidate. */
1038 if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0) 1132 if (compare_eth(router_orig->primary_addr, zero_mac))
1039 return orig_node->router; 1133 goto return_router;
1040 1134
1041 /* find the orig_node which has the primary interface. might 1135 /* find the orig_node which has the primary interface. might
1042 * even be the same as our router_orig in many cases */ 1136 * even be the same as our router_orig in many cases */
1043 1137
1044 if (memcmp(router_orig->primary_addr, 1138 if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
1045 router_orig->orig, ETH_ALEN) == 0) {
1046 primary_orig_node = router_orig; 1139 primary_orig_node = router_orig;
1047 } else { 1140 } else {
1048 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig, 1141 primary_orig_node = orig_hash_find(bat_priv,
1049 choose_orig, 1142 router_orig->primary_addr);
1050 router_orig->primary_addr);
1051
1052 if (!primary_orig_node) 1143 if (!primary_orig_node)
1053 return orig_node->router; 1144 goto return_router;
1145
1146 orig_node_free_ref(primary_orig_node);
1054 } 1147 }
1055 1148
1056 /* with less than 2 candidates, we can't do any 1149 /* with less than 2 candidates, we can't do any
1057 * bonding and prefer the original router. */ 1150 * bonding and prefer the original router. */
1058 1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1059 if (primary_orig_node->bond.candidates < 2) 1152 goto return_router;
1060 return orig_node->router;
1061 1153
1062 1154
1063 /* all nodes between should choose a candidate which 1155 /* all nodes between should choose a candidate which
1064 * is is not on the interface where the packet came 1156 * is is not on the interface where the packet came
1065 * in. */ 1157 * in. */
1066 first_candidate = primary_orig_node->bond.selected; 1158
1067 router = first_candidate; 1159 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1068 1162
1069 if (bonding_enabled) { 1163 if (bonding_enabled) {
1070 /* in the bonding case, send the packets in a round 1164 /* in the bonding case, send the packets in a round
1071 * robin fashion over the remaining interfaces. */ 1165 * robin fashion over the remaining interfaces. */
1072 do { 1166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1073 /* recv_if == NULL on the first node. */ 1171 /* recv_if == NULL on the first node. */
1074 if (router->if_incoming != recv_if) 1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1075 break; 1175 break;
1176 }
1177 }
1076 1178
1077 router = router->next_bond_candidate; 1179 /* use the first candidate if nothing was found. */
1078 } while (router != first_candidate); 1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1079 1183
1080 primary_orig_node->bond.selected = router->next_bond_candidate; 1184 if (!router) {
1185 rcu_read_unlock();
1186 return NULL;
1187 }
1188
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1081 1198
1082 } else { 1199 } else {
1083 /* if bonding is disabled, use the best of the 1200 /* if bonding is disabled, use the best of the
1084 * remaining candidates which are not using 1201 * remaining candidates which are not using
1085 * this interface. */ 1202 * this interface. */
1086 best_router = first_candidate; 1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1087 1207
1088 do {
1089 /* recv_if == NULL on the first node. */ 1208 /* recv_if == NULL on the first node. */
1090 if ((router->if_incoming != recv_if) && 1209 if (tmp_neigh_node->if_incoming == recv_if)
1091 (router->tq_avg > best_router->tq_avg)) 1210 continue;
1092 best_router = router;
1093 1211
1094 router = router->next_bond_candidate; 1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1095 } while (router != first_candidate); 1213 continue;
1096 1214
1097 router = best_router; 1215 /* if we don't have a router yet
1098 } 1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1226 }
1227
1228 neigh_node_free_ref(tmp_neigh_node);
1229 }
1099 1230
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1235 }
1236return_router:
1237 rcu_read_unlock();
1100 return router; 1238 return router;
1101} 1239}
1102 1240
@@ -1125,17 +1263,14 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1125 return 0; 1263 return 0;
1126} 1264}
1127 1265
1128int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, 1266int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1129 int hdr_size)
1130{ 1267{
1131 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1268 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1132 struct orig_node *orig_node; 1269 struct orig_node *orig_node = NULL;
1133 struct neigh_node *router; 1270 struct neigh_node *neigh_node = NULL;
1134 struct batman_if *batman_if;
1135 uint8_t dstaddr[ETH_ALEN];
1136 struct unicast_packet *unicast_packet; 1271 struct unicast_packet *unicast_packet;
1137 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); 1272 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1138 int ret; 1273 int ret = NET_RX_DROP;
1139 struct sk_buff *new_skb; 1274 struct sk_buff *new_skb;
1140 1275
1141 unicast_packet = (struct unicast_packet *)skb->data; 1276 unicast_packet = (struct unicast_packet *)skb->data;
@@ -1145,53 +1280,51 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1145 pr_debug("Warning - can't forward unicast packet from %pM to " 1280 pr_debug("Warning - can't forward unicast packet from %pM to "
1146 "%pM: ttl exceeded\n", ethhdr->h_source, 1281 "%pM: ttl exceeded\n", ethhdr->h_source,
1147 unicast_packet->dest); 1282 unicast_packet->dest);
1148 return NET_RX_DROP; 1283 goto out;
1149 } 1284 }
1150 1285
1151 /* get routing information */ 1286 /* get routing information */
1152 spin_lock_bh(&bat_priv->orig_hash_lock); 1287 rcu_read_lock();
1153 orig_node = ((struct orig_node *) 1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1154 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1155 unicast_packet->dest));
1156
1157 router = find_router(bat_priv, orig_node, recv_if);
1158 1289
1159 if (!router) { 1290 if (!orig_node)
1160 spin_unlock_bh(&bat_priv->orig_hash_lock); 1291 goto unlock;
1161 return NET_RX_DROP;
1162 }
1163 1292
1164 /* don't lock while sending the packets ... we therefore 1293 rcu_read_unlock();
1165 * copy the required data before sending */
1166 1294
1167 batman_if = router->if_incoming; 1295 /* find_router() increases neigh_nodes refcount if found. */
1168 memcpy(dstaddr, router->addr, ETH_ALEN); 1296 neigh_node = find_router(bat_priv, orig_node, recv_if);
1169 1297
1170 spin_unlock_bh(&bat_priv->orig_hash_lock); 1298 if (!neigh_node)
1299 goto out;
1171 1300
1172 /* create a copy of the skb, if needed, to modify it. */ 1301 /* create a copy of the skb, if needed, to modify it. */
1173 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1302 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1174 return NET_RX_DROP; 1303 goto out;
1175 1304
1176 unicast_packet = (struct unicast_packet *)skb->data; 1305 unicast_packet = (struct unicast_packet *)skb->data;
1177 1306
1178 if (unicast_packet->packet_type == BAT_UNICAST && 1307 if (unicast_packet->packet_type == BAT_UNICAST &&
1179 atomic_read(&bat_priv->fragmentation) && 1308 atomic_read(&bat_priv->fragmentation) &&
1180 skb->len > batman_if->net_dev->mtu) 1309 skb->len > neigh_node->if_incoming->net_dev->mtu) {
1181 return frag_send_skb(skb, bat_priv, batman_if, 1310 ret = frag_send_skb(skb, bat_priv,
1182 dstaddr); 1311 neigh_node->if_incoming, neigh_node->addr);
1312 goto out;
1313 }
1183 1314
1184 if (unicast_packet->packet_type == BAT_UNICAST_FRAG && 1315 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
1185 frag_can_reassemble(skb, batman_if->net_dev->mtu)) { 1316 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
1186 1317
1187 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 1318 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1188 1319
1189 if (ret == NET_RX_DROP) 1320 if (ret == NET_RX_DROP)
1190 return NET_RX_DROP; 1321 goto out;
1191 1322
1192 /* packet was buffered for late merge */ 1323 /* packet was buffered for late merge */
1193 if (!new_skb) 1324 if (!new_skb) {
1194 return NET_RX_SUCCESS; 1325 ret = NET_RX_SUCCESS;
1326 goto out;
1327 }
1195 1328
1196 skb = new_skb; 1329 skb = new_skb;
1197 unicast_packet = (struct unicast_packet *)skb->data; 1330 unicast_packet = (struct unicast_packet *)skb->data;
@@ -1201,12 +1334,21 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1201 unicast_packet->ttl--; 1334 unicast_packet->ttl--;
1202 1335
1203 /* route it */ 1336 /* route it */
1204 send_skb_packet(skb, batman_if, dstaddr); 1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1338 ret = NET_RX_SUCCESS;
1339 goto out;
1205 1340
1206 return NET_RX_SUCCESS; 1341unlock:
1342 rcu_read_unlock();
1343out:
1344 if (neigh_node)
1345 neigh_node_free_ref(neigh_node);
1346 if (orig_node)
1347 orig_node_free_ref(orig_node);
1348 return ret;
1207} 1349}
1208 1350
1209int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) 1351int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1210{ 1352{
1211 struct unicast_packet *unicast_packet; 1353 struct unicast_packet *unicast_packet;
1212 int hdr_size = sizeof(struct unicast_packet); 1354 int hdr_size = sizeof(struct unicast_packet);
@@ -1222,10 +1364,10 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1222 return NET_RX_SUCCESS; 1364 return NET_RX_SUCCESS;
1223 } 1365 }
1224 1366
1225 return route_unicast_packet(skb, recv_if, hdr_size); 1367 return route_unicast_packet(skb, recv_if);
1226} 1368}
1227 1369
1228int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) 1370int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1229{ 1371{
1230 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1372 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1231 struct unicast_frag_packet *unicast_packet; 1373 struct unicast_frag_packet *unicast_packet;
@@ -1255,89 +1397,96 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1255 return NET_RX_SUCCESS; 1397 return NET_RX_SUCCESS;
1256 } 1398 }
1257 1399
1258 return route_unicast_packet(skb, recv_if, hdr_size); 1400 return route_unicast_packet(skb, recv_if);
1259} 1401}
1260 1402
1261 1403
1262int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) 1404int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1263{ 1405{
1264 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1406 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1265 struct orig_node *orig_node; 1407 struct orig_node *orig_node = NULL;
1266 struct bcast_packet *bcast_packet; 1408 struct bcast_packet *bcast_packet;
1267 struct ethhdr *ethhdr; 1409 struct ethhdr *ethhdr;
1268 int hdr_size = sizeof(struct bcast_packet); 1410 int hdr_size = sizeof(struct bcast_packet);
1411 int ret = NET_RX_DROP;
1269 int32_t seq_diff; 1412 int32_t seq_diff;
1270 1413
1271 /* drop packet if it has not necessary minimum size */ 1414 /* drop packet if it has not necessary minimum size */
1272 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1415 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1273 return NET_RX_DROP; 1416 goto out;
1274 1417
1275 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1418 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1276 1419
1277 /* packet with broadcast indication but unicast recipient */ 1420 /* packet with broadcast indication but unicast recipient */
1278 if (!is_broadcast_ether_addr(ethhdr->h_dest)) 1421 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1279 return NET_RX_DROP; 1422 goto out;
1280 1423
1281 /* packet with broadcast sender address */ 1424 /* packet with broadcast sender address */
1282 if (is_broadcast_ether_addr(ethhdr->h_source)) 1425 if (is_broadcast_ether_addr(ethhdr->h_source))
1283 return NET_RX_DROP; 1426 goto out;
1284 1427
1285 /* ignore broadcasts sent by myself */ 1428 /* ignore broadcasts sent by myself */
1286 if (is_my_mac(ethhdr->h_source)) 1429 if (is_my_mac(ethhdr->h_source))
1287 return NET_RX_DROP; 1430 goto out;
1288 1431
1289 bcast_packet = (struct bcast_packet *)skb->data; 1432 bcast_packet = (struct bcast_packet *)skb->data;
1290 1433
1291 /* ignore broadcasts originated by myself */ 1434 /* ignore broadcasts originated by myself */
1292 if (is_my_mac(bcast_packet->orig)) 1435 if (is_my_mac(bcast_packet->orig))
1293 return NET_RX_DROP; 1436 goto out;
1294 1437
1295 if (bcast_packet->ttl < 2) 1438 if (bcast_packet->ttl < 2)
1296 return NET_RX_DROP; 1439 goto out;
1297 1440
1298 spin_lock_bh(&bat_priv->orig_hash_lock); 1441 rcu_read_lock();
1299 orig_node = ((struct orig_node *) 1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1300 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1301 bcast_packet->orig));
1302 1443
1303 if (!orig_node) { 1444 if (!orig_node)
1304 spin_unlock_bh(&bat_priv->orig_hash_lock); 1445 goto rcu_unlock;
1305 return NET_RX_DROP; 1446
1306 } 1447 rcu_read_unlock();
1448
1449 spin_lock_bh(&orig_node->bcast_seqno_lock);
1307 1450
1308 /* check whether the packet is a duplicate */ 1451 /* check whether the packet is a duplicate */
1309 if (get_bit_status(orig_node->bcast_bits, 1452 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1310 orig_node->last_bcast_seqno, 1453 ntohl(bcast_packet->seqno)))
1311 ntohl(bcast_packet->seqno))) { 1454 goto spin_unlock;
1312 spin_unlock_bh(&bat_priv->orig_hash_lock);
1313 return NET_RX_DROP;
1314 }
1315 1455
1316 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1456 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1317 1457
1318 /* check whether the packet is old and the host just restarted. */ 1458 /* check whether the packet is old and the host just restarted. */
1319 if (window_protected(bat_priv, seq_diff, 1459 if (window_protected(bat_priv, seq_diff,
1320 &orig_node->bcast_seqno_reset)) { 1460 &orig_node->bcast_seqno_reset))
1321 spin_unlock_bh(&bat_priv->orig_hash_lock); 1461 goto spin_unlock;
1322 return NET_RX_DROP;
1323 }
1324 1462
1325 /* mark broadcast in flood history, update window position 1463 /* mark broadcast in flood history, update window position
1326 * if required. */ 1464 * if required. */
1327 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) 1465 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1328 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); 1466 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1329 1467
1330 spin_unlock_bh(&bat_priv->orig_hash_lock); 1468 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1469
1331 /* rebroadcast packet */ 1470 /* rebroadcast packet */
1332 add_bcast_packet_to_list(bat_priv, skb); 1471 add_bcast_packet_to_list(bat_priv, skb);
1333 1472
1334 /* broadcast for me */ 1473 /* broadcast for me */
1335 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1474 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1475 ret = NET_RX_SUCCESS;
1476 goto out;
1336 1477
1337 return NET_RX_SUCCESS; 1478rcu_unlock:
1479 rcu_read_unlock();
1480 goto out;
1481spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1483out:
1484 if (orig_node)
1485 orig_node_free_ref(orig_node);
1486 return ret;
1338} 1487}
1339 1488
1340int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) 1489int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1341{ 1490{
1342 struct vis_packet *vis_packet; 1491 struct vis_packet *vis_packet;
1343 struct ethhdr *ethhdr; 1492 struct ethhdr *ethhdr;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index a09d16f0c3ab..b5a064c88a4f 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -22,24 +22,25 @@
22#ifndef _NET_BATMAN_ADV_ROUTING_H_ 22#ifndef _NET_BATMAN_ADV_ROUTING_H_
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct batman_if *batman_if); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(struct ethhdr *ethhdr, 26void receive_bat_packet(struct ethhdr *ethhdr,
27 struct batman_packet *batman_packet, 27 struct batman_packet *batman_packet,
28 unsigned char *hna_buff, int hna_buff_len, 28 unsigned char *hna_buff, int hna_buff_len,
29 struct batman_if *if_incoming); 29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node, unsigned char *hna_buff, 31 struct neigh_node *neigh_node, unsigned char *hna_buff,
32 int hna_buff_len); 32 int hna_buff_len);
33int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, 33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34 int hdr_size); 34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); 35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
36int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); 36int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
37int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); 37int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); 38int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); 39int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
41struct neigh_node *find_router(struct bat_priv *bat_priv, 40struct neigh_node *find_router(struct bat_priv *bat_priv,
42 struct orig_node *orig_node, struct batman_if *recv_if); 41 struct orig_node *orig_node,
43void update_bonding_candidates(struct orig_node *orig_node); 42 struct hard_iface *recv_if);
43void bonding_candidate_del(struct orig_node *orig_node,
44 struct neigh_node *neigh_node);
44 45
45#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ 46#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 831427694fc2..d49e54d932af 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -56,20 +56,20 @@ static unsigned long forward_send_time(void)
56/* send out an already prepared packet to the given address via the 56/* send out an already prepared packet to the given address via the
57 * specified batman interface */ 57 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb, 58int send_skb_packet(struct sk_buff *skb,
59 struct batman_if *batman_if, 59 struct hard_iface *hard_iface,
60 uint8_t *dst_addr) 60 uint8_t *dst_addr)
61{ 61{
62 struct ethhdr *ethhdr; 62 struct ethhdr *ethhdr;
63 63
64 if (batman_if->if_status != IF_ACTIVE) 64 if (hard_iface->if_status != IF_ACTIVE)
65 goto send_skb_err; 65 goto send_skb_err;
66 66
67 if (unlikely(!batman_if->net_dev)) 67 if (unlikely(!hard_iface->net_dev))
68 goto send_skb_err; 68 goto send_skb_err;
69 69
70 if (!(batman_if->net_dev->flags & IFF_UP)) { 70 if (!(hard_iface->net_dev->flags & IFF_UP)) {
71 pr_warning("Interface %s is not up - can't send packet via " 71 pr_warning("Interface %s is not up - can't send packet via "
72 "that interface!\n", batman_if->net_dev->name); 72 "that interface!\n", hard_iface->net_dev->name);
73 goto send_skb_err; 73 goto send_skb_err;
74 } 74 }
75 75
@@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb,
80 skb_reset_mac_header(skb); 80 skb_reset_mac_header(skb);
81 81
82 ethhdr = (struct ethhdr *) skb_mac_header(skb); 82 ethhdr = (struct ethhdr *) skb_mac_header(skb);
83 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); 83 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
84 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 84 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 85 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
86 86
@@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb,
88 skb->priority = TC_PRIO_CONTROL; 88 skb->priority = TC_PRIO_CONTROL;
89 skb->protocol = __constant_htons(ETH_P_BATMAN); 89 skb->protocol = __constant_htons(ETH_P_BATMAN);
90 90
91 skb->dev = batman_if->net_dev; 91 skb->dev = hard_iface->net_dev;
92 92
93 /* dev_queue_xmit() returns a negative result on error. However on 93 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
@@ -102,16 +102,16 @@ send_skb_err:
102 102
103/* Send a packet to a given interface */ 103/* Send a packet to a given interface */
104static void send_packet_to_if(struct forw_packet *forw_packet, 104static void send_packet_to_if(struct forw_packet *forw_packet,
105 struct batman_if *batman_if) 105 struct hard_iface *hard_iface)
106{ 106{
107 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 107 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
108 char *fwd_str; 108 char *fwd_str;
109 uint8_t packet_num; 109 uint8_t packet_num;
110 int16_t buff_pos; 110 int16_t buff_pos;
111 struct batman_packet *batman_packet; 111 struct batman_packet *batman_packet;
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 113
114 if (batman_if->if_status != IF_ACTIVE) 114 if (hard_iface->if_status != IF_ACTIVE)
115 return; 115 return;
116 116
117 packet_num = 0; 117 packet_num = 0;
@@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
126 /* we might have aggregated direct link packets with an 126 /* we might have aggregated direct link packets with an
127 * ordinary base packet */ 127 * ordinary base packet */
128 if ((forw_packet->direct_link_flags & (1 << packet_num)) && 128 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
129 (forw_packet->if_incoming == batman_if)) 129 (forw_packet->if_incoming == hard_iface))
130 batman_packet->flags |= DIRECTLINK; 130 batman_packet->flags |= DIRECTLINK;
131 else 131 else
132 batman_packet->flags &= ~DIRECTLINK; 132 batman_packet->flags &= ~DIRECTLINK;
@@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
142 batman_packet->tq, batman_packet->ttl, 142 batman_packet->tq, batman_packet->ttl,
143 (batman_packet->flags & DIRECTLINK ? 143 (batman_packet->flags & DIRECTLINK ?
144 "on" : "off"), 144 "on" : "off"),
145 batman_if->net_dev->name, batman_if->net_dev->dev_addr); 145 hard_iface->net_dev->name,
146 hard_iface->net_dev->dev_addr);
146 147
147 buff_pos += sizeof(struct batman_packet) + 148 buff_pos += sizeof(struct batman_packet) +
148 (batman_packet->num_hna * ETH_ALEN); 149 (batman_packet->num_hna * ETH_ALEN);
@@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
154 /* create clone because function is called more than once */ 155 /* create clone because function is called more than once */
155 skb = skb_clone(forw_packet->skb, GFP_ATOMIC); 156 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156 if (skb) 157 if (skb)
157 send_skb_packet(skb, batman_if, broadcast_addr); 158 send_skb_packet(skb, hard_iface, broadcast_addr);
158} 159}
159 160
160/* send a batman packet */ 161/* send a batman packet */
161static void send_packet(struct forw_packet *forw_packet) 162static void send_packet(struct forw_packet *forw_packet)
162{ 163{
163 struct batman_if *batman_if; 164 struct hard_iface *hard_iface;
164 struct net_device *soft_iface; 165 struct net_device *soft_iface;
165 struct bat_priv *bat_priv; 166 struct bat_priv *bat_priv;
166 struct batman_packet *batman_packet = 167 struct batman_packet *batman_packet =
@@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet)
204 205
205 /* broadcast on every interface */ 206 /* broadcast on every interface */
206 rcu_read_lock(); 207 rcu_read_lock();
207 list_for_each_entry_rcu(batman_if, &if_list, list) { 208 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
208 if (batman_if->soft_iface != soft_iface) 209 if (hard_iface->soft_iface != soft_iface)
209 continue; 210 continue;
210 211
211 send_packet_to_if(forw_packet, batman_if); 212 send_packet_to_if(forw_packet, hard_iface);
212 } 213 }
213 rcu_read_unlock(); 214 rcu_read_unlock();
214} 215}
215 216
216static void rebuild_batman_packet(struct bat_priv *bat_priv, 217static void rebuild_batman_packet(struct bat_priv *bat_priv,
217 struct batman_if *batman_if) 218 struct hard_iface *hard_iface)
218{ 219{
219 int new_len; 220 int new_len;
220 unsigned char *new_buff; 221 unsigned char *new_buff;
@@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
226 227
227 /* keep old buffer if kmalloc should fail */ 228 /* keep old buffer if kmalloc should fail */
228 if (new_buff) { 229 if (new_buff) {
229 memcpy(new_buff, batman_if->packet_buff, 230 memcpy(new_buff, hard_iface->packet_buff,
230 sizeof(struct batman_packet)); 231 sizeof(struct batman_packet));
231 batman_packet = (struct batman_packet *)new_buff; 232 batman_packet = (struct batman_packet *)new_buff;
232 233
@@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
234 new_buff + sizeof(struct batman_packet), 235 new_buff + sizeof(struct batman_packet),
235 new_len - sizeof(struct batman_packet)); 236 new_len - sizeof(struct batman_packet));
236 237
237 kfree(batman_if->packet_buff); 238 kfree(hard_iface->packet_buff);
238 batman_if->packet_buff = new_buff; 239 hard_iface->packet_buff = new_buff;
239 batman_if->packet_len = new_len; 240 hard_iface->packet_len = new_len;
240 } 241 }
241} 242}
242 243
243void schedule_own_packet(struct batman_if *batman_if) 244void schedule_own_packet(struct hard_iface *hard_iface)
244{ 245{
245 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
246 unsigned long send_time; 247 unsigned long send_time;
247 struct batman_packet *batman_packet; 248 struct batman_packet *batman_packet;
248 int vis_server; 249 int vis_server;
249 250
250 if ((batman_if->if_status == IF_NOT_IN_USE) || 251 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
251 (batman_if->if_status == IF_TO_BE_REMOVED)) 252 (hard_iface->if_status == IF_TO_BE_REMOVED))
252 return; 253 return;
253 254
254 vis_server = atomic_read(&bat_priv->vis_mode); 255 vis_server = atomic_read(&bat_priv->vis_mode);
@@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if)
260 * outdated packets (especially uninitialized mac addresses) in the 261 * outdated packets (especially uninitialized mac addresses) in the
261 * packet queue 262 * packet queue
262 */ 263 */
263 if (batman_if->if_status == IF_TO_BE_ACTIVATED) 264 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
264 batman_if->if_status = IF_ACTIVE; 265 hard_iface->if_status = IF_ACTIVE;
265 266
266 /* if local hna has changed and interface is a primary interface */ 267 /* if local hna has changed and interface is a primary interface */
267 if ((atomic_read(&bat_priv->hna_local_changed)) && 268 if ((atomic_read(&bat_priv->hna_local_changed)) &&
268 (batman_if == bat_priv->primary_if)) 269 (hard_iface == bat_priv->primary_if))
269 rebuild_batman_packet(bat_priv, batman_if); 270 rebuild_batman_packet(bat_priv, hard_iface);
270 271
271 /** 272 /**
272 * NOTE: packet_buff might just have been re-allocated in 273 * NOTE: packet_buff might just have been re-allocated in
273 * rebuild_batman_packet() 274 * rebuild_batman_packet()
274 */ 275 */
275 batman_packet = (struct batman_packet *)batman_if->packet_buff; 276 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
276 277
277 /* change sequence number to network order */ 278 /* change sequence number to network order */
278 batman_packet->seqno = 279 batman_packet->seqno =
279 htonl((uint32_t)atomic_read(&batman_if->seqno)); 280 htonl((uint32_t)atomic_read(&hard_iface->seqno));
280 281
281 if (vis_server == VIS_TYPE_SERVER_SYNC) 282 if (vis_server == VIS_TYPE_SERVER_SYNC)
282 batman_packet->flags |= VIS_SERVER; 283 batman_packet->flags |= VIS_SERVER;
283 else 284 else
284 batman_packet->flags &= ~VIS_SERVER; 285 batman_packet->flags &= ~VIS_SERVER;
285 286
286 if ((batman_if == bat_priv->primary_if) && 287 if ((hard_iface == bat_priv->primary_if) &&
287 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 288 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
288 batman_packet->gw_flags = 289 batman_packet->gw_flags =
289 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 290 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
290 else 291 else
291 batman_packet->gw_flags = 0; 292 batman_packet->gw_flags = 0;
292 293
293 atomic_inc(&batman_if->seqno); 294 atomic_inc(&hard_iface->seqno);
294 295
295 slide_own_bcast_window(batman_if); 296 slide_own_bcast_window(hard_iface);
296 send_time = own_send_time(bat_priv); 297 send_time = own_send_time(bat_priv);
297 add_bat_packet_to_list(bat_priv, 298 add_bat_packet_to_list(bat_priv,
298 batman_if->packet_buff, 299 hard_iface->packet_buff,
299 batman_if->packet_len, 300 hard_iface->packet_len,
300 batman_if, 1, send_time); 301 hard_iface, 1, send_time);
301} 302}
302 303
303void schedule_forward_packet(struct orig_node *orig_node, 304void schedule_forward_packet(struct orig_node *orig_node,
304 struct ethhdr *ethhdr, 305 struct ethhdr *ethhdr,
305 struct batman_packet *batman_packet, 306 struct batman_packet *batman_packet,
306 uint8_t directlink, int hna_buff_len, 307 uint8_t directlink, int hna_buff_len,
307 struct batman_if *if_incoming) 308 struct hard_iface *if_incoming)
308{ 309{
309 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
310 unsigned char in_tq, in_ttl, tq_avg = 0; 311 unsigned char in_tq, in_ttl, tq_avg = 0;
@@ -326,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
326 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { 327 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
327 328
328 /* rebroadcast ogm of best ranking neighbor as is */ 329 /* rebroadcast ogm of best ranking neighbor as is */
329 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) { 330 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
330 batman_packet->tq = orig_node->router->tq_avg; 331 batman_packet->tq = orig_node->router->tq_avg;
331 332
332 if (orig_node->router->last_ttl) 333 if (orig_node->router->last_ttl)
@@ -443,7 +444,7 @@ out:
443 444
444static void send_outstanding_bcast_packet(struct work_struct *work) 445static void send_outstanding_bcast_packet(struct work_struct *work)
445{ 446{
446 struct batman_if *batman_if; 447 struct hard_iface *hard_iface;
447 struct delayed_work *delayed_work = 448 struct delayed_work *delayed_work =
448 container_of(work, struct delayed_work, work); 449 container_of(work, struct delayed_work, work);
449 struct forw_packet *forw_packet = 450 struct forw_packet *forw_packet =
@@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
461 462
462 /* rebroadcast packet */ 463 /* rebroadcast packet */
463 rcu_read_lock(); 464 rcu_read_lock();
464 list_for_each_entry_rcu(batman_if, &if_list, list) { 465 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
465 if (batman_if->soft_iface != soft_iface) 466 if (hard_iface->soft_iface != soft_iface)
466 continue; 467 continue;
467 468
468 /* send a copy of the saved skb */ 469 /* send a copy of the saved skb */
469 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 470 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
470 if (skb1) 471 if (skb1)
471 send_skb_packet(skb1, batman_if, broadcast_addr); 472 send_skb_packet(skb1, hard_iface, broadcast_addr);
472 } 473 }
473 rcu_read_unlock(); 474 rcu_read_unlock();
474 475
@@ -521,15 +522,15 @@ out:
521} 522}
522 523
523void purge_outstanding_packets(struct bat_priv *bat_priv, 524void purge_outstanding_packets(struct bat_priv *bat_priv,
524 struct batman_if *batman_if) 525 struct hard_iface *hard_iface)
525{ 526{
526 struct forw_packet *forw_packet; 527 struct forw_packet *forw_packet;
527 struct hlist_node *tmp_node, *safe_tmp_node; 528 struct hlist_node *tmp_node, *safe_tmp_node;
528 529
529 if (batman_if) 530 if (hard_iface)
530 bat_dbg(DBG_BATMAN, bat_priv, 531 bat_dbg(DBG_BATMAN, bat_priv,
531 "purge_outstanding_packets(): %s\n", 532 "purge_outstanding_packets(): %s\n",
532 batman_if->net_dev->name); 533 hard_iface->net_dev->name);
533 else 534 else
534 bat_dbg(DBG_BATMAN, bat_priv, 535 bat_dbg(DBG_BATMAN, bat_priv,
535 "purge_outstanding_packets()\n"); 536 "purge_outstanding_packets()\n");
@@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
543 * if purge_outstanding_packets() was called with an argmument 544 * if purge_outstanding_packets() was called with an argmument
544 * we delete only packets belonging to the given interface 545 * we delete only packets belonging to the given interface
545 */ 546 */
546 if ((batman_if) && 547 if ((hard_iface) &&
547 (forw_packet->if_incoming != batman_if)) 548 (forw_packet->if_incoming != hard_iface))
548 continue; 549 continue;
549 550
550 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 551 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
567 * if purge_outstanding_packets() was called with an argmument 568 * if purge_outstanding_packets() was called with an argmument
568 * we delete only packets belonging to the given interface 569 * we delete only packets belonging to the given interface
569 */ 570 */
570 if ((batman_if) && 571 if ((hard_iface) &&
571 (forw_packet->if_incoming != batman_if)) 572 (forw_packet->if_incoming != hard_iface))
572 continue; 573 continue;
573 574
574 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 575 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index b68c272cb84f..7b2ff19c05e7 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -23,17 +23,17 @@
23#define _NET_BATMAN_ADV_SEND_H_ 23#define _NET_BATMAN_ADV_SEND_H_
24 24
25int send_skb_packet(struct sk_buff *skb, 25int send_skb_packet(struct sk_buff *skb,
26 struct batman_if *batman_if, 26 struct hard_iface *hard_iface,
27 uint8_t *dst_addr); 27 uint8_t *dst_addr);
28void schedule_own_packet(struct batman_if *batman_if); 28void schedule_own_packet(struct hard_iface *hard_iface);
29void schedule_forward_packet(struct orig_node *orig_node, 29void schedule_forward_packet(struct orig_node *orig_node,
30 struct ethhdr *ethhdr, 30 struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 31 struct batman_packet *batman_packet,
32 uint8_t directlink, int hna_buff_len, 32 uint8_t directlink, int hna_buff_len,
33 struct batman_if *if_outgoing); 33 struct hard_iface *if_outgoing);
34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); 34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
35void send_outstanding_bat_packet(struct work_struct *work); 35void send_outstanding_bat_packet(struct work_struct *work);
36void purge_outstanding_packets(struct bat_priv *bat_priv, 36void purge_outstanding_packets(struct bat_priv *bat_priv,
37 struct batman_if *batman_if); 37 struct hard_iface *hard_iface);
38 38
39#endif /* _NET_BATMAN_ADV_SEND_H_ */ 39#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index bd088f877e38..9ed26140a269 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -29,14 +29,12 @@
29#include "hash.h" 29#include "hash.h"
30#include "gateway_common.h" 30#include "gateway_common.h"
31#include "gateway_client.h" 31#include "gateway_client.h"
32#include "send.h"
33#include "bat_sysfs.h" 32#include "bat_sysfs.h"
34#include <linux/slab.h> 33#include <linux/slab.h>
35#include <linux/ethtool.h> 34#include <linux/ethtool.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
38#include "unicast.h" 37#include "unicast.h"
39#include "routing.h"
40 38
41 39
42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 40static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -78,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
78 return 0; 76 return 0;
79} 77}
80 78
81static void softif_neigh_free_ref(struct kref *refcount) 79static void softif_neigh_free_rcu(struct rcu_head *rcu)
82{ 80{
83 struct softif_neigh *softif_neigh; 81 struct softif_neigh *softif_neigh;
84 82
85 softif_neigh = container_of(refcount, struct softif_neigh, refcount); 83 softif_neigh = container_of(rcu, struct softif_neigh, rcu);
86 kfree(softif_neigh); 84 kfree(softif_neigh);
87} 85}
88 86
89static void softif_neigh_free_rcu(struct rcu_head *rcu) 87static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
90{ 88{
91 struct softif_neigh *softif_neigh; 89 if (atomic_dec_and_test(&softif_neigh->refcount))
92 90 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
93 softif_neigh = container_of(rcu, struct softif_neigh, rcu);
94 kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
95} 91}
96 92
97void softif_neigh_purge(struct bat_priv *bat_priv) 93void softif_neigh_purge(struct bat_priv *bat_priv)
@@ -118,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
118 softif_neigh->addr, softif_neigh->vid); 114 softif_neigh->addr, softif_neigh->vid);
119 softif_neigh_tmp = bat_priv->softif_neigh; 115 softif_neigh_tmp = bat_priv->softif_neigh;
120 bat_priv->softif_neigh = NULL; 116 bat_priv->softif_neigh = NULL;
121 kref_put(&softif_neigh_tmp->refcount, 117 softif_neigh_free_ref(softif_neigh_tmp);
122 softif_neigh_free_ref);
123 } 118 }
124 119
125 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); 120 softif_neigh_free_ref(softif_neigh);
126 } 121 }
127 122
128 spin_unlock_bh(&bat_priv->softif_neigh_lock); 123 spin_unlock_bh(&bat_priv->softif_neigh_lock);
@@ -137,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
137 rcu_read_lock(); 132 rcu_read_lock();
138 hlist_for_each_entry_rcu(softif_neigh, node, 133 hlist_for_each_entry_rcu(softif_neigh, node,
139 &bat_priv->softif_neigh_list, list) { 134 &bat_priv->softif_neigh_list, list) {
140 if (memcmp(softif_neigh->addr, addr, ETH_ALEN) != 0) 135 if (!compare_eth(softif_neigh->addr, addr))
141 continue; 136 continue;
142 137
143 if (softif_neigh->vid != vid) 138 if (softif_neigh->vid != vid)
144 continue; 139 continue;
145 140
141 if (!atomic_inc_not_zero(&softif_neigh->refcount))
142 continue;
143
146 softif_neigh->last_seen = jiffies; 144 softif_neigh->last_seen = jiffies;
147 goto found; 145 goto out;
148 } 146 }
149 147
150 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 148 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
@@ -154,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
154 memcpy(softif_neigh->addr, addr, ETH_ALEN); 152 memcpy(softif_neigh->addr, addr, ETH_ALEN);
155 softif_neigh->vid = vid; 153 softif_neigh->vid = vid;
156 softif_neigh->last_seen = jiffies; 154 softif_neigh->last_seen = jiffies;
157 kref_init(&softif_neigh->refcount); 155 /* initialize with 2 - caller decrements counter by one */
156 atomic_set(&softif_neigh->refcount, 2);
158 157
159 INIT_HLIST_NODE(&softif_neigh->list); 158 INIT_HLIST_NODE(&softif_neigh->list);
160 spin_lock_bh(&bat_priv->softif_neigh_lock); 159 spin_lock_bh(&bat_priv->softif_neigh_lock);
161 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); 160 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
162 spin_unlock_bh(&bat_priv->softif_neigh_lock); 161 spin_unlock_bh(&bat_priv->softif_neigh_lock);
163 162
164found:
165 kref_get(&softif_neigh->refcount);
166out: 163out:
167 rcu_read_unlock(); 164 rcu_read_unlock();
168 return softif_neigh; 165 return softif_neigh;
@@ -174,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
174 struct bat_priv *bat_priv = netdev_priv(net_dev); 171 struct bat_priv *bat_priv = netdev_priv(net_dev);
175 struct softif_neigh *softif_neigh; 172 struct softif_neigh *softif_neigh;
176 struct hlist_node *node; 173 struct hlist_node *node;
177 size_t buf_size, pos;
178 char *buff;
179 174
180 if (!bat_priv->primary_if) { 175 if (!bat_priv->primary_if) {
181 return seq_printf(seq, "BATMAN mesh %s disabled - " 176 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -185,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
185 180
186 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); 181 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
187 182
188 buf_size = 1;
189 /* Estimate length for: " xx:xx:xx:xx:xx:xx\n" */
190 rcu_read_lock(); 183 rcu_read_lock();
191 hlist_for_each_entry_rcu(softif_neigh, node, 184 hlist_for_each_entry_rcu(softif_neigh, node,
192 &bat_priv->softif_neigh_list, list) 185 &bat_priv->softif_neigh_list, list)
193 buf_size += 30; 186 seq_printf(seq, "%s %pM (vid: %d)\n",
194 rcu_read_unlock();
195
196 buff = kmalloc(buf_size, GFP_ATOMIC);
197 if (!buff)
198 return -ENOMEM;
199
200 buff[0] = '\0';
201 pos = 0;
202
203 rcu_read_lock();
204 hlist_for_each_entry_rcu(softif_neigh, node,
205 &bat_priv->softif_neigh_list, list) {
206 pos += snprintf(buff + pos, 31, "%s %pM (vid: %d)\n",
207 bat_priv->softif_neigh == softif_neigh 187 bat_priv->softif_neigh == softif_neigh
208 ? "=>" : " ", softif_neigh->addr, 188 ? "=>" : " ", softif_neigh->addr,
209 softif_neigh->vid); 189 softif_neigh->vid);
210 }
211 rcu_read_unlock(); 190 rcu_read_unlock();
212 191
213 seq_printf(seq, "%s", buff);
214 kfree(buff);
215 return 0; 192 return 0;
216} 193}
217 194
@@ -266,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
266 softif_neigh->addr, softif_neigh->vid); 243 softif_neigh->addr, softif_neigh->vid);
267 softif_neigh_tmp = bat_priv->softif_neigh; 244 softif_neigh_tmp = bat_priv->softif_neigh;
268 bat_priv->softif_neigh = softif_neigh; 245 bat_priv->softif_neigh = softif_neigh;
269 kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref); 246 softif_neigh_free_ref(softif_neigh_tmp);
270 /* we need to hold the additional reference */ 247 /* we need to hold the additional reference */
271 goto err; 248 goto err;
272 } 249 }
@@ -284,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
284 } 261 }
285 262
286out: 263out:
287 kref_put(&softif_neigh->refcount, softif_neigh_free_ref); 264 softif_neigh_free_ref(softif_neigh);
288err: 265err:
289 kfree_skb(skb); 266 kfree_skb(skb);
290 return; 267 return;
@@ -437,7 +414,7 @@ end:
437} 414}
438 415
439void interface_rx(struct net_device *soft_iface, 416void interface_rx(struct net_device *soft_iface,
440 struct sk_buff *skb, struct batman_if *recv_if, 417 struct sk_buff *skb, struct hard_iface *recv_if,
441 int hdr_size) 418 int hdr_size)
442{ 419{
443 struct bat_priv *bat_priv = netdev_priv(soft_iface); 420 struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -485,7 +462,7 @@ void interface_rx(struct net_device *soft_iface,
485 462
486 memcpy(unicast_packet->dest, 463 memcpy(unicast_packet->dest,
487 bat_priv->softif_neigh->addr, ETH_ALEN); 464 bat_priv->softif_neigh->addr, ETH_ALEN);
488 ret = route_unicast_packet(skb, recv_if, hdr_size); 465 ret = route_unicast_packet(skb, recv_if);
489 if (ret == NET_RX_DROP) 466 if (ret == NET_RX_DROP)
490 goto dropped; 467 goto dropped;
491 468
@@ -645,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface)
645 unregister_netdevice(soft_iface); 622 unregister_netdevice(soft_iface);
646} 623}
647 624
625int softif_is_valid(struct net_device *net_dev)
626{
627#ifdef HAVE_NET_DEVICE_OPS
628 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
629 return 1;
630#else
631 if (net_dev->hard_start_xmit == interface_tx)
632 return 1;
633#endif
634
635 return 0;
636}
637
648/* ethtool */ 638/* ethtool */
649static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 639static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
650{ 640{
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index e7b0e1a34a55..4789b6f2a0b3 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
27void softif_neigh_purge(struct bat_priv *bat_priv); 27void softif_neigh_purge(struct bat_priv *bat_priv);
28int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); 28int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
29void interface_rx(struct net_device *soft_iface, 29void interface_rx(struct net_device *soft_iface,
30 struct sk_buff *skb, struct batman_if *recv_if, 30 struct sk_buff *skb, struct hard_iface *recv_if,
31 int hdr_size); 31 int hdr_size);
32struct net_device *softif_create(char *name); 32struct net_device *softif_create(char *name);
33void softif_destroy(struct net_device *soft_iface); 33void softif_destroy(struct net_device *soft_iface);
34int softif_is_valid(struct net_device *net_dev);
34 35
35#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 36#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7fb6726ccbdd..8d15b48d1692 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
30 struct hna_global_entry *hna_global_entry, 30 struct hna_global_entry *hna_global_entry,
31 char *message); 31 char *message);
32 32
33/* returns 1 if they are the same mac addr */
34static int compare_lhna(struct hlist_node *node, void *data2)
35{
36 void *data1 = container_of(node, struct hna_local_entry, hash_entry);
37
38 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39}
40
41/* returns 1 if they are the same mac addr */
42static int compare_ghna(struct hlist_node *node, void *data2)
43{
44 void *data1 = container_of(node, struct hna_global_entry, hash_entry);
45
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47}
48
33static void hna_local_start_timer(struct bat_priv *bat_priv) 49static void hna_local_start_timer(struct bat_priv *bat_priv)
34{ 50{
35 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); 51 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); 52 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37} 53}
38 54
55static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
56 void *data)
57{
58 struct hashtable_t *hash = bat_priv->hna_local_hash;
59 struct hlist_head *head;
60 struct hlist_node *node;
61 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
62 int index;
63
64 if (!hash)
65 return NULL;
66
67 index = choose_orig(data, hash->size);
68 head = &hash->table[index];
69
70 rcu_read_lock();
71 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
72 if (!compare_eth(hna_local_entry, data))
73 continue;
74
75 hna_local_entry_tmp = hna_local_entry;
76 break;
77 }
78 rcu_read_unlock();
79
80 return hna_local_entry_tmp;
81}
82
83static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
84 void *data)
85{
86 struct hashtable_t *hash = bat_priv->hna_global_hash;
87 struct hlist_head *head;
88 struct hlist_node *node;
89 struct hna_global_entry *hna_global_entry;
90 struct hna_global_entry *hna_global_entry_tmp = NULL;
91 int index;
92
93 if (!hash)
94 return NULL;
95
96 index = choose_orig(data, hash->size);
97 head = &hash->table[index];
98
99 rcu_read_lock();
100 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
101 if (!compare_eth(hna_global_entry, data))
102 continue;
103
104 hna_global_entry_tmp = hna_global_entry;
105 break;
106 }
107 rcu_read_unlock();
108
109 return hna_global_entry_tmp;
110}
111
39int hna_local_init(struct bat_priv *bat_priv) 112int hna_local_init(struct bat_priv *bat_priv)
40{ 113{
41 if (bat_priv->hna_local_hash) 114 if (bat_priv->hna_local_hash)
@@ -60,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
60 int required_bytes; 133 int required_bytes;
61 134
62 spin_lock_bh(&bat_priv->hna_lhash_lock); 135 spin_lock_bh(&bat_priv->hna_lhash_lock);
63 hna_local_entry = 136 hna_local_entry = hna_local_hash_find(bat_priv, addr);
64 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
65 compare_orig, choose_orig,
66 addr));
67 spin_unlock_bh(&bat_priv->hna_lhash_lock); 137 spin_unlock_bh(&bat_priv->hna_lhash_lock);
68 138
69 if (hna_local_entry) { 139 if (hna_local_entry) {
@@ -99,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
99 hna_local_entry->last_seen = jiffies; 169 hna_local_entry->last_seen = jiffies;
100 170
101 /* the batman interface mac address should never be purged */ 171 /* the batman interface mac address should never be purged */
102 if (compare_orig(addr, soft_iface->dev_addr)) 172 if (compare_eth(addr, soft_iface->dev_addr))
103 hna_local_entry->never_purge = 1; 173 hna_local_entry->never_purge = 1;
104 else 174 else
105 hna_local_entry->never_purge = 0; 175 hna_local_entry->never_purge = 0;
106 176
107 spin_lock_bh(&bat_priv->hna_lhash_lock); 177 spin_lock_bh(&bat_priv->hna_lhash_lock);
108 178
109 hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig, 179 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
110 hna_local_entry); 180 hna_local_entry, &hna_local_entry->hash_entry);
111 bat_priv->num_local_hna++; 181 bat_priv->num_local_hna++;
112 atomic_set(&bat_priv->hna_local_changed, 1); 182 atomic_set(&bat_priv->hna_local_changed, 1);
113 183
@@ -116,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
116 /* remove address from global hash if present */ 186 /* remove address from global hash if present */
117 spin_lock_bh(&bat_priv->hna_ghash_lock); 187 spin_lock_bh(&bat_priv->hna_ghash_lock);
118 188
119 hna_global_entry = ((struct hna_global_entry *) 189 hna_global_entry = hna_global_hash_find(bat_priv, addr);
120 hash_find(bat_priv->hna_global_hash,
121 compare_orig, choose_orig, addr));
122 190
123 if (hna_global_entry) 191 if (hna_global_entry)
124 _hna_global_del_orig(bat_priv, hna_global_entry, 192 _hna_global_del_orig(bat_priv, hna_global_entry,
@@ -132,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
132{ 200{
133 struct hashtable_t *hash = bat_priv->hna_local_hash; 201 struct hashtable_t *hash = bat_priv->hna_local_hash;
134 struct hna_local_entry *hna_local_entry; 202 struct hna_local_entry *hna_local_entry;
135 struct element_t *bucket; 203 struct hlist_node *node;
136 int i;
137 struct hlist_node *walk;
138 struct hlist_head *head; 204 struct hlist_head *head;
139 int count = 0; 205 int i, count = 0;
140 206
141 spin_lock_bh(&bat_priv->hna_lhash_lock); 207 spin_lock_bh(&bat_priv->hna_lhash_lock);
142 208
143 for (i = 0; i < hash->size; i++) { 209 for (i = 0; i < hash->size; i++) {
144 head = &hash->table[i]; 210 head = &hash->table[i];
145 211
146 hlist_for_each_entry(bucket, walk, head, hlist) { 212 rcu_read_lock();
147 213 hlist_for_each_entry_rcu(hna_local_entry, node,
214 head, hash_entry) {
148 if (buff_len < (count + 1) * ETH_ALEN) 215 if (buff_len < (count + 1) * ETH_ALEN)
149 break; 216 break;
150 217
151 hna_local_entry = bucket->data;
152 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, 218 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
153 ETH_ALEN); 219 ETH_ALEN);
154 220
155 count++; 221 count++;
156 } 222 }
223 rcu_read_unlock();
157 } 224 }
158 225
159 /* if we did not get all new local hnas see you next time ;-) */ 226 /* if we did not get all new local hnas see you next time ;-) */
@@ -170,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
170 struct bat_priv *bat_priv = netdev_priv(net_dev); 237 struct bat_priv *bat_priv = netdev_priv(net_dev);
171 struct hashtable_t *hash = bat_priv->hna_local_hash; 238 struct hashtable_t *hash = bat_priv->hna_local_hash;
172 struct hna_local_entry *hna_local_entry; 239 struct hna_local_entry *hna_local_entry;
173 int i; 240 struct hlist_node *node;
174 struct hlist_node *walk;
175 struct hlist_head *head; 241 struct hlist_head *head;
176 struct element_t *bucket;
177 size_t buf_size, pos; 242 size_t buf_size, pos;
178 char *buff; 243 char *buff;
244 int i;
179 245
180 if (!bat_priv->primary_if) { 246 if (!bat_priv->primary_if) {
181 return seq_printf(seq, "BATMAN mesh %s disabled - " 247 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -194,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
194 for (i = 0; i < hash->size; i++) { 260 for (i = 0; i < hash->size; i++) {
195 head = &hash->table[i]; 261 head = &hash->table[i];
196 262
197 hlist_for_each(walk, head) 263 rcu_read_lock();
264 __hlist_for_each_rcu(node, head)
198 buf_size += 21; 265 buf_size += 21;
266 rcu_read_unlock();
199 } 267 }
200 268
201 buff = kmalloc(buf_size, GFP_ATOMIC); 269 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -203,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
203 spin_unlock_bh(&bat_priv->hna_lhash_lock); 271 spin_unlock_bh(&bat_priv->hna_lhash_lock);
204 return -ENOMEM; 272 return -ENOMEM;
205 } 273 }
274
206 buff[0] = '\0'; 275 buff[0] = '\0';
207 pos = 0; 276 pos = 0;
208 277
209 for (i = 0; i < hash->size; i++) { 278 for (i = 0; i < hash->size; i++) {
210 head = &hash->table[i]; 279 head = &hash->table[i];
211 280
212 hlist_for_each_entry(bucket, walk, head, hlist) { 281 rcu_read_lock();
213 hna_local_entry = bucket->data; 282 hlist_for_each_entry_rcu(hna_local_entry, node,
214 283 head, hash_entry) {
215 pos += snprintf(buff + pos, 22, " * %pM\n", 284 pos += snprintf(buff + pos, 22, " * %pM\n",
216 hna_local_entry->addr); 285 hna_local_entry->addr);
217 } 286 }
287 rcu_read_unlock();
218 } 288 }
219 289
220 spin_unlock_bh(&bat_priv->hna_lhash_lock); 290 spin_unlock_bh(&bat_priv->hna_lhash_lock);
@@ -224,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
224 return 0; 294 return 0;
225} 295}
226 296
227static void _hna_local_del(void *data, void *arg) 297static void _hna_local_del(struct hlist_node *node, void *arg)
228{ 298{
229 struct bat_priv *bat_priv = (struct bat_priv *)arg; 299 struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 void *data = container_of(node, struct hna_local_entry, hash_entry);
230 301
231 kfree(data); 302 kfree(data);
232 bat_priv->num_local_hna--; 303 bat_priv->num_local_hna--;
@@ -240,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
240 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", 311 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
241 hna_local_entry->addr, message); 312 hna_local_entry->addr, message);
242 313
243 hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig, 314 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
244 hna_local_entry->addr); 315 hna_local_entry->addr);
245 _hna_local_del(hna_local_entry, bat_priv); 316 _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
246} 317}
247 318
248void hna_local_remove(struct bat_priv *bat_priv, 319void hna_local_remove(struct bat_priv *bat_priv,
@@ -252,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
252 323
253 spin_lock_bh(&bat_priv->hna_lhash_lock); 324 spin_lock_bh(&bat_priv->hna_lhash_lock);
254 325
255 hna_local_entry = (struct hna_local_entry *) 326 hna_local_entry = hna_local_hash_find(bat_priv, addr);
256 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
257 addr);
258 327
259 if (hna_local_entry) 328 if (hna_local_entry)
260 hna_local_del(bat_priv, hna_local_entry, message); 329 hna_local_del(bat_priv, hna_local_entry, message);
@@ -270,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
270 container_of(delayed_work, struct bat_priv, hna_work); 339 container_of(delayed_work, struct bat_priv, hna_work);
271 struct hashtable_t *hash = bat_priv->hna_local_hash; 340 struct hashtable_t *hash = bat_priv->hna_local_hash;
272 struct hna_local_entry *hna_local_entry; 341 struct hna_local_entry *hna_local_entry;
273 int i; 342 struct hlist_node *node, *node_tmp;
274 struct hlist_node *walk, *safe;
275 struct hlist_head *head; 343 struct hlist_head *head;
276 struct element_t *bucket;
277 unsigned long timeout; 344 unsigned long timeout;
345 int i;
278 346
279 spin_lock_bh(&bat_priv->hna_lhash_lock); 347 spin_lock_bh(&bat_priv->hna_lhash_lock);
280 348
281 for (i = 0; i < hash->size; i++) { 349 for (i = 0; i < hash->size; i++) {
282 head = &hash->table[i]; 350 head = &hash->table[i];
283 351
284 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { 352 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
285 hna_local_entry = bucket->data; 353 head, hash_entry) {
354 if (hna_local_entry->never_purge)
355 continue;
286 356
287 timeout = hna_local_entry->last_seen; 357 timeout = hna_local_entry->last_seen;
288 timeout += LOCAL_HNA_TIMEOUT * HZ; 358 timeout += LOCAL_HNA_TIMEOUT * HZ;
289 359
290 if ((!hna_local_entry->never_purge) && 360 if (time_before(jiffies, timeout))
291 time_after(jiffies, timeout)) 361 continue;
292 hna_local_del(bat_priv, hna_local_entry, 362
293 "address timed out"); 363 hna_local_del(bat_priv, hna_local_entry,
364 "address timed out");
294 } 365 }
295 } 366 }
296 367
@@ -334,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
334 spin_lock_bh(&bat_priv->hna_ghash_lock); 405 spin_lock_bh(&bat_priv->hna_ghash_lock);
335 406
336 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 407 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
337 hna_global_entry = (struct hna_global_entry *) 408 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
338 hash_find(bat_priv->hna_global_hash, compare_orig,
339 choose_orig, hna_ptr);
340 409
341 if (!hna_global_entry) { 410 if (!hna_global_entry) {
342 spin_unlock_bh(&bat_priv->hna_ghash_lock); 411 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -356,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
356 hna_global_entry->addr, orig_node->orig); 425 hna_global_entry->addr, orig_node->orig);
357 426
358 spin_lock_bh(&bat_priv->hna_ghash_lock); 427 spin_lock_bh(&bat_priv->hna_ghash_lock);
359 hash_add(bat_priv->hna_global_hash, compare_orig, 428 hash_add(bat_priv->hna_global_hash, compare_ghna,
360 choose_orig, hna_global_entry); 429 choose_orig, hna_global_entry,
430 &hna_global_entry->hash_entry);
361 431
362 } 432 }
363 433
@@ -368,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
368 spin_lock_bh(&bat_priv->hna_lhash_lock); 438 spin_lock_bh(&bat_priv->hna_lhash_lock);
369 439
370 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 440 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
371 hna_local_entry = (struct hna_local_entry *) 441 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
372 hash_find(bat_priv->hna_local_hash, compare_orig,
373 choose_orig, hna_ptr);
374 442
375 if (hna_local_entry) 443 if (hna_local_entry)
376 hna_local_del(bat_priv, hna_local_entry, 444 hna_local_del(bat_priv, hna_local_entry,
@@ -400,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
400 struct bat_priv *bat_priv = netdev_priv(net_dev); 468 struct bat_priv *bat_priv = netdev_priv(net_dev);
401 struct hashtable_t *hash = bat_priv->hna_global_hash; 469 struct hashtable_t *hash = bat_priv->hna_global_hash;
402 struct hna_global_entry *hna_global_entry; 470 struct hna_global_entry *hna_global_entry;
403 int i; 471 struct hlist_node *node;
404 struct hlist_node *walk;
405 struct hlist_head *head; 472 struct hlist_head *head;
406 struct element_t *bucket;
407 size_t buf_size, pos; 473 size_t buf_size, pos;
408 char *buff; 474 char *buff;
475 int i;
409 476
410 if (!bat_priv->primary_if) { 477 if (!bat_priv->primary_if) {
411 return seq_printf(seq, "BATMAN mesh %s disabled - " 478 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -423,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
423 for (i = 0; i < hash->size; i++) { 490 for (i = 0; i < hash->size; i++) {
424 head = &hash->table[i]; 491 head = &hash->table[i];
425 492
426 hlist_for_each(walk, head) 493 rcu_read_lock();
494 __hlist_for_each_rcu(node, head)
427 buf_size += 43; 495 buf_size += 43;
496 rcu_read_unlock();
428 } 497 }
429 498
430 buff = kmalloc(buf_size, GFP_ATOMIC); 499 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -438,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
438 for (i = 0; i < hash->size; i++) { 507 for (i = 0; i < hash->size; i++) {
439 head = &hash->table[i]; 508 head = &hash->table[i];
440 509
441 hlist_for_each_entry(bucket, walk, head, hlist) { 510 rcu_read_lock();
442 hna_global_entry = bucket->data; 511 hlist_for_each_entry_rcu(hna_global_entry, node,
443 512 head, hash_entry) {
444 pos += snprintf(buff + pos, 44, 513 pos += snprintf(buff + pos, 44,
445 " * %pM via %pM\n", 514 " * %pM via %pM\n",
446 hna_global_entry->addr, 515 hna_global_entry->addr,
447 hna_global_entry->orig_node->orig); 516 hna_global_entry->orig_node->orig);
448 } 517 }
518 rcu_read_unlock();
449 } 519 }
450 520
451 spin_unlock_bh(&bat_priv->hna_ghash_lock); 521 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -464,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
464 hna_global_entry->addr, hna_global_entry->orig_node->orig, 534 hna_global_entry->addr, hna_global_entry->orig_node->orig,
465 message); 535 message);
466 536
467 hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig, 537 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
468 hna_global_entry->addr); 538 hna_global_entry->addr);
469 kfree(hna_global_entry); 539 kfree(hna_global_entry);
470} 540}
@@ -483,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
483 553
484 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { 554 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
485 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); 555 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
486 hna_global_entry = (struct hna_global_entry *) 556 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
487 hash_find(bat_priv->hna_global_hash, compare_orig,
488 choose_orig, hna_ptr);
489 557
490 if ((hna_global_entry) && 558 if ((hna_global_entry) &&
491 (hna_global_entry->orig_node == orig_node)) 559 (hna_global_entry->orig_node == orig_node))
@@ -502,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
502 orig_node->hna_buff = NULL; 570 orig_node->hna_buff = NULL;
503} 571}
504 572
505static void hna_global_del(void *data, void *arg) 573static void hna_global_del(struct hlist_node *node, void *arg)
506{ 574{
575 void *data = container_of(node, struct hna_global_entry, hash_entry);
576
507 kfree(data); 577 kfree(data);
508} 578}
509 579
@@ -519,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
519struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 589struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
520{ 590{
521 struct hna_global_entry *hna_global_entry; 591 struct hna_global_entry *hna_global_entry;
592 struct orig_node *orig_node = NULL;
522 593
523 spin_lock_bh(&bat_priv->hna_ghash_lock); 594 spin_lock_bh(&bat_priv->hna_ghash_lock);
524 hna_global_entry = (struct hna_global_entry *) 595 hna_global_entry = hna_global_hash_find(bat_priv, addr);
525 hash_find(bat_priv->hna_global_hash,
526 compare_orig, choose_orig, addr);
527 spin_unlock_bh(&bat_priv->hna_ghash_lock);
528 596
529 if (!hna_global_entry) 597 if (!hna_global_entry)
530 return NULL; 598 goto out;
531 599
532 return hna_global_entry->orig_node; 600 if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
601 goto out;
602
603 orig_node = hna_global_entry->orig_node;
604
605out:
606 spin_unlock_bh(&bat_priv->hna_ghash_lock);
607 return orig_node;
533} 608}
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 7270405046e9..83445cf0cc9f 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -33,7 +33,7 @@
33 sizeof(struct bcast_packet)))) 33 sizeof(struct bcast_packet))))
34 34
35 35
36struct batman_if { 36struct hard_iface {
37 struct list_head list; 37 struct list_head list;
38 int16_t if_num; 38 int16_t if_num;
39 char if_status; 39 char if_status;
@@ -43,7 +43,7 @@ struct batman_if {
43 unsigned char *packet_buff; 43 unsigned char *packet_buff;
44 int packet_len; 44 int packet_len;
45 struct kobject *hardif_obj; 45 struct kobject *hardif_obj;
46 struct kref refcount; 46 atomic_t refcount;
47 struct packet_type batman_adv_ptype; 47 struct packet_type batman_adv_ptype;
48 struct net_device *soft_iface; 48 struct net_device *soft_iface;
49 struct rcu_head rcu; 49 struct rcu_head rcu;
@@ -70,8 +70,6 @@ struct orig_node {
70 struct neigh_node *router; 70 struct neigh_node *router;
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 uint8_t tq_own;
74 int tq_asym_penalty;
75 unsigned long last_valid; 73 unsigned long last_valid;
76 unsigned long bcast_seqno_reset; 74 unsigned long bcast_seqno_reset;
77 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
@@ -83,20 +81,28 @@ struct orig_node {
83 uint8_t last_ttl; 81 uint8_t last_ttl;
84 unsigned long bcast_bits[NUM_WORDS]; 82 unsigned long bcast_bits[NUM_WORDS];
85 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
86 struct list_head neigh_list; 84 struct hlist_head neigh_list;
87 struct list_head frag_list; 85 struct list_head frag_list;
86 spinlock_t neigh_list_lock; /* protects neighbor list */
87 atomic_t refcount;
88 struct rcu_head rcu;
89 struct hlist_node hash_entry;
90 struct bat_priv *bat_priv;
88 unsigned long last_frag_packet; 91 unsigned long last_frag_packet;
89 struct { 92 spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum,
90 uint8_t candidates; 93 * neigh_node->real_bits,
91 struct neigh_node *selected; 94 * neigh_node->real_packet_count */
92 } bond; 95 spinlock_t bcast_seqno_lock; /* protects bcast_bits,
96 * last_bcast_seqno */
97 atomic_t bond_candidates;
98 struct list_head bond_list;
93}; 99};
94 100
95struct gw_node { 101struct gw_node {
96 struct hlist_node list; 102 struct hlist_node list;
97 struct orig_node *orig_node; 103 struct orig_node *orig_node;
98 unsigned long deleted; 104 unsigned long deleted;
99 struct kref refcount; 105 atomic_t refcount;
100 struct rcu_head rcu; 106 struct rcu_head rcu;
101}; 107};
102 108
@@ -105,18 +111,20 @@ struct gw_node {
105 * @last_valid: when last packet via this neighbor was received 111 * @last_valid: when last packet via this neighbor was received
106 */ 112 */
107struct neigh_node { 113struct neigh_node {
108 struct list_head list; 114 struct hlist_node list;
109 uint8_t addr[ETH_ALEN]; 115 uint8_t addr[ETH_ALEN];
110 uint8_t real_packet_count; 116 uint8_t real_packet_count;
111 uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; 117 uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
112 uint8_t tq_index; 118 uint8_t tq_index;
113 uint8_t tq_avg; 119 uint8_t tq_avg;
114 uint8_t last_ttl; 120 uint8_t last_ttl;
115 struct neigh_node *next_bond_candidate; 121 struct list_head bonding_list;
116 unsigned long last_valid; 122 unsigned long last_valid;
117 unsigned long real_bits[NUM_WORDS]; 123 unsigned long real_bits[NUM_WORDS];
124 atomic_t refcount;
125 struct rcu_head rcu;
118 struct orig_node *orig_node; 126 struct orig_node *orig_node;
119 struct batman_if *if_incoming; 127 struct hard_iface *if_incoming;
120}; 128};
121 129
122 130
@@ -140,7 +148,7 @@ struct bat_priv {
140 struct hlist_head softif_neigh_list; 148 struct hlist_head softif_neigh_list;
141 struct softif_neigh *softif_neigh; 149 struct softif_neigh *softif_neigh;
142 struct debug_log *debug_log; 150 struct debug_log *debug_log;
143 struct batman_if *primary_if; 151 struct hard_iface *primary_if;
144 struct kobject *mesh_obj; 152 struct kobject *mesh_obj;
145 struct dentry *debug_dir; 153 struct dentry *debug_dir;
146 struct hlist_head forw_bat_list; 154 struct hlist_head forw_bat_list;
@@ -151,12 +159,11 @@ struct bat_priv {
151 struct hashtable_t *hna_local_hash; 159 struct hashtable_t *hna_local_hash;
152 struct hashtable_t *hna_global_hash; 160 struct hashtable_t *hna_global_hash;
153 struct hashtable_t *vis_hash; 161 struct hashtable_t *vis_hash;
154 spinlock_t orig_hash_lock; /* protects orig_hash */
155 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 162 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
156 spinlock_t forw_bcast_list_lock; /* protects */ 163 spinlock_t forw_bcast_list_lock; /* protects */
157 spinlock_t hna_lhash_lock; /* protects hna_local_hash */ 164 spinlock_t hna_lhash_lock; /* protects hna_local_hash */
158 spinlock_t hna_ghash_lock; /* protects hna_global_hash */ 165 spinlock_t hna_ghash_lock; /* protects hna_global_hash */
159 spinlock_t gw_list_lock; /* protects gw_list */ 166 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
160 spinlock_t vis_hash_lock; /* protects vis_hash */ 167 spinlock_t vis_hash_lock; /* protects vis_hash */
161 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 168 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
162 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ 169 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
@@ -165,7 +172,7 @@ struct bat_priv {
165 struct delayed_work hna_work; 172 struct delayed_work hna_work;
166 struct delayed_work orig_work; 173 struct delayed_work orig_work;
167 struct delayed_work vis_work; 174 struct delayed_work vis_work;
168 struct gw_node *curr_gw; 175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
169 struct vis_info *my_vis_info; 176 struct vis_info *my_vis_info;
170}; 177};
171 178
@@ -188,11 +195,13 @@ struct hna_local_entry {
188 uint8_t addr[ETH_ALEN]; 195 uint8_t addr[ETH_ALEN];
189 unsigned long last_seen; 196 unsigned long last_seen;
190 char never_purge; 197 char never_purge;
198 struct hlist_node hash_entry;
191}; 199};
192 200
193struct hna_global_entry { 201struct hna_global_entry {
194 uint8_t addr[ETH_ALEN]; 202 uint8_t addr[ETH_ALEN];
195 struct orig_node *orig_node; 203 struct orig_node *orig_node;
204 struct hlist_node hash_entry;
196}; 205};
197 206
198/** 207/**
@@ -208,7 +217,7 @@ struct forw_packet {
208 uint32_t direct_link_flags; 217 uint32_t direct_link_flags;
209 uint8_t num_packets; 218 uint8_t num_packets;
210 struct delayed_work delayed_work; 219 struct delayed_work delayed_work;
211 struct batman_if *if_incoming; 220 struct hard_iface *if_incoming;
212}; 221};
213 222
214/* While scanning for vis-entries of a particular vis-originator 223/* While scanning for vis-entries of a particular vis-originator
@@ -242,6 +251,7 @@ struct vis_info {
242 * from. we should not reply to them. */ 251 * from. we should not reply to them. */
243 struct list_head send_list; 252 struct list_head send_list;
244 struct kref refcount; 253 struct kref refcount;
254 struct hlist_node hash_entry;
245 struct bat_priv *bat_priv; 255 struct bat_priv *bat_priv;
246 /* this packet might be part of the vis send queue. */ 256 /* this packet might be part of the vis send queue. */
247 struct sk_buff *skb_packet; 257 struct sk_buff *skb_packet;
@@ -264,7 +274,7 @@ struct softif_neigh {
264 uint8_t addr[ETH_ALEN]; 274 uint8_t addr[ETH_ALEN];
265 unsigned long last_seen; 275 unsigned long last_seen;
266 short vid; 276 short vid;
267 struct kref refcount; 277 atomic_t refcount;
268 struct rcu_head rcu; 278 struct rcu_head rcu;
269}; 279};
270 280
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 121b11d2a23d..19f84bd443af 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -183,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
183 (struct unicast_frag_packet *)skb->data; 183 (struct unicast_frag_packet *)skb->data;
184 184
185 *new_skb = NULL; 185 *new_skb = NULL;
186 spin_lock_bh(&bat_priv->orig_hash_lock);
187 orig_node = ((struct orig_node *)
188 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
189 unicast_packet->orig));
190 186
191 if (!orig_node) { 187 orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
192 pr_debug("couldn't find originator in orig_hash\n"); 188 if (!orig_node)
193 goto out; 189 goto out;
194 }
195 190
196 orig_node->last_frag_packet = jiffies; 191 orig_node->last_frag_packet = jiffies;
197 192
@@ -215,14 +210,15 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
215 /* if not, merge failed */ 210 /* if not, merge failed */
216 if (*new_skb) 211 if (*new_skb)
217 ret = NET_RX_SUCCESS; 212 ret = NET_RX_SUCCESS;
218out:
219 spin_unlock_bh(&bat_priv->orig_hash_lock);
220 213
214out:
215 if (orig_node)
216 orig_node_free_ref(orig_node);
221 return ret; 217 return ret;
222} 218}
223 219
224int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 220int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
225 struct batman_if *batman_if, uint8_t dstaddr[]) 221 struct hard_iface *hard_iface, uint8_t dstaddr[])
226{ 222{
227 struct unicast_packet tmp_uc, *unicast_packet; 223 struct unicast_packet tmp_uc, *unicast_packet;
228 struct sk_buff *frag_skb; 224 struct sk_buff *frag_skb;
@@ -267,12 +263,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
267 frag1->flags = UNI_FRAG_HEAD | large_tail; 263 frag1->flags = UNI_FRAG_HEAD | large_tail;
268 frag2->flags = large_tail; 264 frag2->flags = large_tail;
269 265
270 seqno = atomic_add_return(2, &batman_if->frag_seqno); 266 seqno = atomic_add_return(2, &hard_iface->frag_seqno);
271 frag1->seqno = htons(seqno - 1); 267 frag1->seqno = htons(seqno - 1);
272 frag2->seqno = htons(seqno); 268 frag2->seqno = htons(seqno);
273 269
274 send_skb_packet(skb, batman_if, dstaddr); 270 send_skb_packet(skb, hard_iface, dstaddr);
275 send_skb_packet(frag_skb, batman_if, dstaddr); 271 send_skb_packet(frag_skb, hard_iface, dstaddr);
276 return NET_RX_SUCCESS; 272 return NET_RX_SUCCESS;
277 273
278drop_frag: 274drop_frag:
@@ -286,40 +282,37 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
286{ 282{
287 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 283 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
288 struct unicast_packet *unicast_packet; 284 struct unicast_packet *unicast_packet;
289 struct orig_node *orig_node = NULL; 285 struct orig_node *orig_node;
290 struct batman_if *batman_if; 286 struct neigh_node *neigh_node;
291 struct neigh_node *router;
292 int data_len = skb->len; 287 int data_len = skb->len;
293 uint8_t dstaddr[6]; 288 int ret = 1;
294
295 spin_lock_bh(&bat_priv->orig_hash_lock);
296 289
297 /* get routing information */ 290 /* get routing information */
298 if (is_multicast_ether_addr(ethhdr->h_dest)) 291 if (is_multicast_ether_addr(ethhdr->h_dest)) {
299 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 292 orig_node = (struct orig_node *)gw_get_selected(bat_priv);
293 if (orig_node)
294 goto find_router;
295 }
300 296
301 /* check for hna host */ 297 /* check for hna host - increases orig_node refcount */
302 if (!orig_node) 298 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
303 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
304
305 router = find_router(bat_priv, orig_node, NULL);
306
307 if (!router)
308 goto unlock;
309
310 /* don't lock while sending the packets ... we therefore
311 * copy the required data before sending */
312 299
313 batman_if = router->if_incoming; 300find_router:
314 memcpy(dstaddr, router->addr, ETH_ALEN); 301 /**
302 * find_router():
303 * - if orig_node is NULL it returns NULL
304 * - increases neigh_nodes refcount if found.
305 */
306 neigh_node = find_router(bat_priv, orig_node, NULL);
315 307
316 spin_unlock_bh(&bat_priv->orig_hash_lock); 308 if (!neigh_node)
309 goto out;
317 310
318 if (batman_if->if_status != IF_ACTIVE) 311 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
319 goto dropped; 312 goto out;
320 313
321 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0) 314 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
322 goto dropped; 315 goto out;
323 316
324 unicast_packet = (struct unicast_packet *)skb->data; 317 unicast_packet = (struct unicast_packet *)skb->data;
325 318
@@ -333,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
333 326
334 if (atomic_read(&bat_priv->fragmentation) && 327 if (atomic_read(&bat_priv->fragmentation) &&
335 data_len + sizeof(struct unicast_packet) > 328 data_len + sizeof(struct unicast_packet) >
336 batman_if->net_dev->mtu) { 329 neigh_node->if_incoming->net_dev->mtu) {
337 /* send frag skb decreases ttl */ 330 /* send frag skb decreases ttl */
338 unicast_packet->ttl++; 331 unicast_packet->ttl++;
339 return frag_send_skb(skb, bat_priv, batman_if, 332 ret = frag_send_skb(skb, bat_priv,
340 dstaddr); 333 neigh_node->if_incoming, neigh_node->addr);
334 goto out;
341 } 335 }
342 send_skb_packet(skb, batman_if, dstaddr);
343 return 0;
344 336
345unlock: 337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
346 spin_unlock_bh(&bat_priv->orig_hash_lock); 338 ret = 0;
347dropped: 339 goto out;
348 kfree_skb(skb); 340
349 return 1; 341out:
342 if (neigh_node)
343 neigh_node_free_ref(neigh_node);
344 if (orig_node)
345 orig_node_free_ref(orig_node);
346 if (ret == 1)
347 kfree_skb(skb);
348 return ret;
350} 349}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 8897308281d4..16ad7a9242b5 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -32,7 +32,7 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
32void frag_list_free(struct list_head *head); 32void frag_list_free(struct list_head *head);
33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
35 struct batman_if *batman_if, uint8_t dstaddr[]); 35 struct hard_iface *hard_iface, uint8_t dstaddr[]);
36 36
37static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) 37static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
38{ 38{
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 7db9ad82cc00..f90212f42082 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -68,15 +68,16 @@ static void free_info(struct kref *ref)
68} 68}
69 69
70/* Compare two vis packets, used by the hashing algorithm */ 70/* Compare two vis packets, used by the hashing algorithm */
71static int vis_info_cmp(void *data1, void *data2) 71static int vis_info_cmp(struct hlist_node *node, void *data2)
72{ 72{
73 struct vis_info *d1, *d2; 73 struct vis_info *d1, *d2;
74 struct vis_packet *p1, *p2; 74 struct vis_packet *p1, *p2;
75 d1 = data1; 75
76 d1 = container_of(node, struct vis_info, hash_entry);
76 d2 = data2; 77 d2 = data2;
77 p1 = (struct vis_packet *)d1->skb_packet->data; 78 p1 = (struct vis_packet *)d1->skb_packet->data;
78 p2 = (struct vis_packet *)d2->skb_packet->data; 79 p2 = (struct vis_packet *)d2->skb_packet->data;
79 return compare_orig(p1->vis_orig, p2->vis_orig); 80 return compare_eth(p1->vis_orig, p2->vis_orig);
80} 81}
81 82
82/* hash function to choose an entry in a hash table of given size */ 83/* hash function to choose an entry in a hash table of given size */
@@ -104,6 +105,34 @@ static int vis_info_choose(void *data, int size)
104 return hash % size; 105 return hash % size;
105} 106}
106 107
108static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
109 void *data)
110{
111 struct hashtable_t *hash = bat_priv->vis_hash;
112 struct hlist_head *head;
113 struct hlist_node *node;
114 struct vis_info *vis_info, *vis_info_tmp = NULL;
115 int index;
116
117 if (!hash)
118 return NULL;
119
120 index = vis_info_choose(data, hash->size);
121 head = &hash->table[index];
122
123 rcu_read_lock();
124 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
125 if (!vis_info_cmp(node, data))
126 continue;
127
128 vis_info_tmp = vis_info;
129 break;
130 }
131 rcu_read_unlock();
132
133 return vis_info_tmp;
134}
135
107/* insert interface to the list of interfaces of one originator, if it 136/* insert interface to the list of interfaces of one originator, if it
108 * does not already exist in the list */ 137 * does not already exist in the list */
109static void vis_data_insert_interface(const uint8_t *interface, 138static void vis_data_insert_interface(const uint8_t *interface,
@@ -114,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
114 struct hlist_node *pos; 143 struct hlist_node *pos;
115 144
116 hlist_for_each_entry(entry, pos, if_list, list) { 145 hlist_for_each_entry(entry, pos, if_list, list) {
117 if (compare_orig(entry->addr, (void *)interface)) 146 if (compare_eth(entry->addr, (void *)interface))
118 return; 147 return;
119 } 148 }
120 149
@@ -166,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
166 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
167 if (primary && entry->quality == 0) 196 if (primary && entry->quality == 0)
168 return sprintf(buff, "HNA %pM, ", entry->dest); 197 return sprintf(buff, "HNA %pM, ", entry->dest);
169 else if (compare_orig(entry->src, src)) 198 else if (compare_eth(entry->src, src))
170 return sprintf(buff, "TQ %pM %d, ", entry->dest, 199 return sprintf(buff, "TQ %pM %d, ", entry->dest,
171 entry->quality); 200 entry->quality);
172 201
@@ -175,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
175 204
176int vis_seq_print_text(struct seq_file *seq, void *offset) 205int vis_seq_print_text(struct seq_file *seq, void *offset)
177{ 206{
178 struct hlist_node *walk; 207 struct hlist_node *node;
179 struct hlist_head *head; 208 struct hlist_head *head;
180 struct element_t *bucket;
181 struct vis_info *info; 209 struct vis_info *info;
182 struct vis_packet *packet; 210 struct vis_packet *packet;
183 struct vis_info_entry *entries; 211 struct vis_info_entry *entries;
@@ -203,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
203 for (i = 0; i < hash->size; i++) { 231 for (i = 0; i < hash->size; i++) {
204 head = &hash->table[i]; 232 head = &hash->table[i];
205 233
206 hlist_for_each_entry(bucket, walk, head, hlist) { 234 rcu_read_lock();
207 info = bucket->data; 235 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
208 packet = (struct vis_packet *)info->skb_packet->data; 236 packet = (struct vis_packet *)info->skb_packet->data;
209 entries = (struct vis_info_entry *) 237 entries = (struct vis_info_entry *)
210 ((char *)packet + sizeof(struct vis_packet)); 238 ((char *)packet + sizeof(struct vis_packet));
@@ -213,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
213 if (entries[j].quality == 0) 241 if (entries[j].quality == 0)
214 continue; 242 continue;
215 compare = 243 compare =
216 compare_orig(entries[j].src, packet->vis_orig); 244 compare_eth(entries[j].src, packet->vis_orig);
217 vis_data_insert_interface(entries[j].src, 245 vis_data_insert_interface(entries[j].src,
218 &vis_if_list, 246 &vis_if_list,
219 compare); 247 compare);
@@ -223,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
223 buf_size += 18 + 26 * packet->entries; 251 buf_size += 18 + 26 * packet->entries;
224 252
225 /* add primary/secondary records */ 253 /* add primary/secondary records */
226 if (compare_orig(entry->addr, packet->vis_orig)) 254 if (compare_eth(entry->addr, packet->vis_orig))
227 buf_size += 255 buf_size +=
228 vis_data_count_prim_sec(&vis_if_list); 256 vis_data_count_prim_sec(&vis_if_list);
229 257
@@ -236,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
236 kfree(entry); 264 kfree(entry);
237 } 265 }
238 } 266 }
267 rcu_read_unlock();
239 } 268 }
240 269
241 buff = kmalloc(buf_size, GFP_ATOMIC); 270 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -249,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
249 for (i = 0; i < hash->size; i++) { 278 for (i = 0; i < hash->size; i++) {
250 head = &hash->table[i]; 279 head = &hash->table[i];
251 280
252 hlist_for_each_entry(bucket, walk, head, hlist) { 281 rcu_read_lock();
253 info = bucket->data; 282 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
254 packet = (struct vis_packet *)info->skb_packet->data; 283 packet = (struct vis_packet *)info->skb_packet->data;
255 entries = (struct vis_info_entry *) 284 entries = (struct vis_info_entry *)
256 ((char *)packet + sizeof(struct vis_packet)); 285 ((char *)packet + sizeof(struct vis_packet));
@@ -259,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
259 if (entries[j].quality == 0) 288 if (entries[j].quality == 0)
260 continue; 289 continue;
261 compare = 290 compare =
262 compare_orig(entries[j].src, packet->vis_orig); 291 compare_eth(entries[j].src, packet->vis_orig);
263 vis_data_insert_interface(entries[j].src, 292 vis_data_insert_interface(entries[j].src,
264 &vis_if_list, 293 &vis_if_list,
265 compare); 294 compare);
@@ -277,7 +306,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
277 entry->primary); 306 entry->primary);
278 307
279 /* add primary/secondary records */ 308 /* add primary/secondary records */
280 if (compare_orig(entry->addr, packet->vis_orig)) 309 if (compare_eth(entry->addr, packet->vis_orig))
281 buff_pos += 310 buff_pos +=
282 vis_data_read_prim_sec(buff + buff_pos, 311 vis_data_read_prim_sec(buff + buff_pos,
283 &vis_if_list); 312 &vis_if_list);
@@ -291,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
291 kfree(entry); 320 kfree(entry);
292 } 321 }
293 } 322 }
323 rcu_read_unlock();
294 } 324 }
295 325
296 spin_unlock_bh(&bat_priv->vis_hash_lock); 326 spin_unlock_bh(&bat_priv->vis_hash_lock);
@@ -345,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
345 375
346 spin_lock_bh(&bat_priv->vis_list_lock); 376 spin_lock_bh(&bat_priv->vis_list_lock);
347 list_for_each_entry(entry, recv_list, list) { 377 list_for_each_entry(entry, recv_list, list) {
348 if (memcmp(entry->mac, mac, ETH_ALEN) == 0) { 378 if (compare_eth(entry->mac, mac)) {
349 spin_unlock_bh(&bat_priv->vis_list_lock); 379 spin_unlock_bh(&bat_priv->vis_list_lock);
350 return 1; 380 return 1;
351 } 381 }
@@ -381,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
381 sizeof(struct vis_packet)); 411 sizeof(struct vis_packet));
382 412
383 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); 413 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
384 old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 414 old_info = vis_hash_find(bat_priv, &search_elem);
385 &search_elem);
386 kfree_skb(search_elem.skb_packet); 415 kfree_skb(search_elem.skb_packet);
387 416
388 if (old_info) { 417 if (old_info) {
@@ -442,7 +471,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
442 471
443 /* try to add it */ 472 /* try to add it */
444 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 473 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
445 info); 474 info, &info->hash_entry);
446 if (hash_added < 0) { 475 if (hash_added < 0) {
447 /* did not work (for some reason) */ 476 /* did not work (for some reason) */
448 kref_put(&info->refcount, free_info); 477 kref_put(&info->refcount, free_info);
@@ -529,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
529 struct vis_info *info) 558 struct vis_info *info)
530{ 559{
531 struct hashtable_t *hash = bat_priv->orig_hash; 560 struct hashtable_t *hash = bat_priv->orig_hash;
532 struct hlist_node *walk; 561 struct hlist_node *node;
533 struct hlist_head *head; 562 struct hlist_head *head;
534 struct element_t *bucket;
535 struct orig_node *orig_node; 563 struct orig_node *orig_node;
536 struct vis_packet *packet; 564 struct vis_packet *packet;
537 int best_tq = -1, i; 565 int best_tq = -1, i;
@@ -541,16 +569,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
541 for (i = 0; i < hash->size; i++) { 569 for (i = 0; i < hash->size; i++) {
542 head = &hash->table[i]; 570 head = &hash->table[i];
543 571
544 hlist_for_each_entry(bucket, walk, head, hlist) { 572 rcu_read_lock();
545 orig_node = bucket->data; 573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
546 if ((orig_node) && (orig_node->router) && 574 if ((orig_node) && (orig_node->router) &&
547 (orig_node->flags & VIS_SERVER) && 575 (orig_node->flags & VIS_SERVER) &&
548 (orig_node->router->tq_avg > best_tq)) { 576 (orig_node->router->tq_avg > best_tq)) {
549 best_tq = orig_node->router->tq_avg; 577 best_tq = orig_node->router->tq_avg;
550 memcpy(packet->target_orig, orig_node->orig, 578 memcpy(packet->target_orig, orig_node->orig,
551 ETH_ALEN); 579 ETH_ALEN);
552 } 580 }
553 } 581 }
582 rcu_read_unlock();
554 } 583 }
555 584
556 return best_tq; 585 return best_tq;
@@ -573,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info)
573static int generate_vis_packet(struct bat_priv *bat_priv) 602static int generate_vis_packet(struct bat_priv *bat_priv)
574{ 603{
575 struct hashtable_t *hash = bat_priv->orig_hash; 604 struct hashtable_t *hash = bat_priv->orig_hash;
576 struct hlist_node *walk; 605 struct hlist_node *node;
577 struct hlist_head *head; 606 struct hlist_head *head;
578 struct element_t *bucket;
579 struct orig_node *orig_node; 607 struct orig_node *orig_node;
580 struct neigh_node *neigh_node; 608 struct neigh_node *neigh_node;
581 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
@@ -587,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
587 info->first_seen = jiffies; 615 info->first_seen = jiffies;
588 packet->vis_type = atomic_read(&bat_priv->vis_mode); 616 packet->vis_type = atomic_read(&bat_priv->vis_mode);
589 617
590 spin_lock_bh(&bat_priv->orig_hash_lock);
591 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 618 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
592 packet->ttl = TTL; 619 packet->ttl = TTL;
593 packet->seqno = htonl(ntohl(packet->seqno) + 1); 620 packet->seqno = htonl(ntohl(packet->seqno) + 1);
@@ -597,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
597 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { 624 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
598 best_tq = find_best_vis_server(bat_priv, info); 625 best_tq = find_best_vis_server(bat_priv, info);
599 626
600 if (best_tq < 0) { 627 if (best_tq < 0)
601 spin_unlock_bh(&bat_priv->orig_hash_lock);
602 return -1; 628 return -1;
603 }
604 } 629 }
605 630
606 for (i = 0; i < hash->size; i++) { 631 for (i = 0; i < hash->size; i++) {
607 head = &hash->table[i]; 632 head = &hash->table[i];
608 633
609 hlist_for_each_entry(bucket, walk, head, hlist) { 634 rcu_read_lock();
610 orig_node = bucket->data; 635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
611 neigh_node = orig_node->router; 636 neigh_node = orig_node->router;
612 637
613 if (!neigh_node) 638 if (!neigh_node)
614 continue; 639 continue;
615 640
616 if (!compare_orig(neigh_node->addr, orig_node->orig)) 641 if (!compare_eth(neigh_node->addr, orig_node->orig))
617 continue; 642 continue;
618 643
619 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 644 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
@@ -632,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
632 entry->quality = neigh_node->tq_avg; 657 entry->quality = neigh_node->tq_avg;
633 packet->entries++; 658 packet->entries++;
634 659
635 if (vis_packet_full(info)) { 660 if (vis_packet_full(info))
636 spin_unlock_bh(&bat_priv->orig_hash_lock); 661 goto unlock;
637 return 0;
638 }
639 } 662 }
663 rcu_read_unlock();
640 } 664 }
641 665
642 spin_unlock_bh(&bat_priv->orig_hash_lock);
643
644 hash = bat_priv->hna_local_hash; 666 hash = bat_priv->hna_local_hash;
645 667
646 spin_lock_bh(&bat_priv->hna_lhash_lock); 668 spin_lock_bh(&bat_priv->hna_lhash_lock);
647 for (i = 0; i < hash->size; i++) { 669 for (i = 0; i < hash->size; i++) {
648 head = &hash->table[i]; 670 head = &hash->table[i];
649 671
650 hlist_for_each_entry(bucket, walk, head, hlist) { 672 hlist_for_each_entry(hna_local_entry, node, head, hash_entry) {
651 hna_local_entry = bucket->data;
652 entry = (struct vis_info_entry *) 673 entry = (struct vis_info_entry *)
653 skb_put(info->skb_packet, 674 skb_put(info->skb_packet,
654 sizeof(*entry)); 675 sizeof(*entry));
@@ -666,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
666 687
667 spin_unlock_bh(&bat_priv->hna_lhash_lock); 688 spin_unlock_bh(&bat_priv->hna_lhash_lock);
668 return 0; 689 return 0;
690
691unlock:
692 rcu_read_unlock();
693 return 0;
669} 694}
670 695
671/* free old vis packets. Must be called with this vis_hash_lock 696/* free old vis packets. Must be called with this vis_hash_lock
@@ -674,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
674{ 699{
675 int i; 700 int i;
676 struct hashtable_t *hash = bat_priv->vis_hash; 701 struct hashtable_t *hash = bat_priv->vis_hash;
677 struct hlist_node *walk, *safe; 702 struct hlist_node *node, *node_tmp;
678 struct hlist_head *head; 703 struct hlist_head *head;
679 struct element_t *bucket;
680 struct vis_info *info; 704 struct vis_info *info;
681 705
682 for (i = 0; i < hash->size; i++) { 706 for (i = 0; i < hash->size; i++) {
683 head = &hash->table[i]; 707 head = &hash->table[i];
684 708
685 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { 709 hlist_for_each_entry_safe(info, node, node_tmp,
686 info = bucket->data; 710 head, hash_entry) {
687
688 /* never purge own data. */ 711 /* never purge own data. */
689 if (info == bat_priv->my_vis_info) 712 if (info == bat_priv->my_vis_info)
690 continue; 713 continue;
691 714
692 if (time_after(jiffies, 715 if (time_after(jiffies,
693 info->first_seen + VIS_TIMEOUT * HZ)) { 716 info->first_seen + VIS_TIMEOUT * HZ)) {
694 hlist_del(walk); 717 hlist_del(node);
695 kfree(bucket);
696 send_list_del(info); 718 send_list_del(info);
697 kref_put(&info->refcount, free_info); 719 kref_put(&info->refcount, free_info);
698 } 720 }
@@ -704,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
704 struct vis_info *info) 726 struct vis_info *info)
705{ 727{
706 struct hashtable_t *hash = bat_priv->orig_hash; 728 struct hashtable_t *hash = bat_priv->orig_hash;
707 struct hlist_node *walk; 729 struct hlist_node *node;
708 struct hlist_head *head; 730 struct hlist_head *head;
709 struct element_t *bucket;
710 struct orig_node *orig_node; 731 struct orig_node *orig_node;
711 struct vis_packet *packet; 732 struct vis_packet *packet;
712 struct sk_buff *skb; 733 struct sk_buff *skb;
713 struct batman_if *batman_if; 734 struct hard_iface *hard_iface;
714 uint8_t dstaddr[ETH_ALEN]; 735 uint8_t dstaddr[ETH_ALEN];
715 int i; 736 int i;
716 737
717 738
718 spin_lock_bh(&bat_priv->orig_hash_lock);
719 packet = (struct vis_packet *)info->skb_packet->data; 739 packet = (struct vis_packet *)info->skb_packet->data;
720 740
721 /* send to all routers in range. */ 741 /* send to all routers in range. */
722 for (i = 0; i < hash->size; i++) { 742 for (i = 0; i < hash->size; i++) {
723 head = &hash->table[i]; 743 head = &hash->table[i];
724 744
725 hlist_for_each_entry(bucket, walk, head, hlist) { 745 rcu_read_lock();
726 orig_node = bucket->data; 746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
727
728 /* if it's a vis server and reachable, send it. */ 747 /* if it's a vis server and reachable, send it. */
729 if ((!orig_node) || (!orig_node->router)) 748 if ((!orig_node) || (!orig_node->router))
730 continue; 749 continue;
@@ -737,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
737 continue; 756 continue;
738 757
739 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
740 batman_if = orig_node->router->if_incoming; 759 hard_iface = orig_node->router->if_incoming;
741 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
742 spin_unlock_bh(&bat_priv->orig_hash_lock);
743 761
744 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 762 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
745 if (skb) 763 if (skb)
746 send_skb_packet(skb, batman_if, dstaddr); 764 send_skb_packet(skb, hard_iface, dstaddr);
747 765
748 spin_lock_bh(&bat_priv->orig_hash_lock);
749 } 766 }
750 767 rcu_read_unlock();
751 } 768 }
752
753 spin_unlock_bh(&bat_priv->orig_hash_lock);
754} 769}
755 770
756static void unicast_vis_packet(struct bat_priv *bat_priv, 771static void unicast_vis_packet(struct bat_priv *bat_priv,
757 struct vis_info *info) 772 struct vis_info *info)
758{ 773{
759 struct orig_node *orig_node; 774 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL;
760 struct sk_buff *skb; 776 struct sk_buff *skb;
761 struct vis_packet *packet; 777 struct vis_packet *packet;
762 struct batman_if *batman_if;
763 uint8_t dstaddr[ETH_ALEN];
764 778
765 spin_lock_bh(&bat_priv->orig_hash_lock);
766 packet = (struct vis_packet *)info->skb_packet->data; 779 packet = (struct vis_packet *)info->skb_packet->data;
767 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
768 compare_orig, choose_orig,
769 packet->target_orig));
770 780
771 if ((!orig_node) || (!orig_node->router)) 781 rcu_read_lock();
772 goto out; 782 orig_node = orig_hash_find(bat_priv, packet->target_orig);
773 783
774 /* don't lock while sending the packets ... we therefore 784 if (!orig_node)
775 * copy the required data before sending */ 785 goto unlock;
776 batman_if = orig_node->router->if_incoming; 786
777 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 787 neigh_node = orig_node->router;
778 spin_unlock_bh(&bat_priv->orig_hash_lock); 788
789 if (!neigh_node)
790 goto unlock;
791
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
779 798
780 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 799 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
781 if (skb) 800 if (skb)
782 send_skb_packet(skb, batman_if, dstaddr); 801 send_skb_packet(skb, neigh_node->if_incoming,
802 neigh_node->addr);
783 803
784 return; 804 goto out;
785 805
806unlock:
807 rcu_read_unlock();
786out: 808out:
787 spin_unlock_bh(&bat_priv->orig_hash_lock); 809 if (neigh_node)
810 neigh_node_free_ref(neigh_node);
811 if (orig_node)
812 orig_node_free_ref(orig_node);
813 return;
788} 814}
789 815
790/* only send one vis packet. called from send_vis_packets() */ 816/* only send one vis packet. called from send_vis_packets() */
@@ -896,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv)
896 INIT_LIST_HEAD(&bat_priv->vis_send_list); 922 INIT_LIST_HEAD(&bat_priv->vis_send_list);
897 923
898 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 924 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
899 bat_priv->my_vis_info); 925 bat_priv->my_vis_info,
926 &bat_priv->my_vis_info->hash_entry);
900 if (hash_added < 0) { 927 if (hash_added < 0) {
901 pr_err("Can't add own vis packet into hash\n"); 928 pr_err("Can't add own vis packet into hash\n");
902 /* not in hash, need to remove it manually. */ 929 /* not in hash, need to remove it manually. */
@@ -918,10 +945,11 @@ err:
918} 945}
919 946
920/* Decrease the reference count on a hash item info */ 947/* Decrease the reference count on a hash item info */
921static void free_info_ref(void *data, void *arg) 948static void free_info_ref(struct hlist_node *node, void *arg)
922{ 949{
923 struct vis_info *info = data; 950 struct vis_info *info;
924 951
952 info = container_of(node, struct vis_info, hash_entry);
925 send_list_del(info); 953 send_list_del(info);
926 kref_put(&info->refcount, free_info); 954 kref_put(&info->refcount, free_info);
927} 955}
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index c6f9c2fb4891..6ae5ec508587 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -31,9 +31,10 @@ menuconfig BT
31 to Bluetooth kernel modules are provided in the BlueZ packages. For 31 to Bluetooth kernel modules are provided in the BlueZ packages. For
32 more information, see <http://www.bluez.org/>. 32 more information, see <http://www.bluez.org/>.
33 33
34if BT != n
35
34config BT_L2CAP 36config BT_L2CAP
35 bool "L2CAP protocol support" 37 bool "L2CAP protocol support"
36 depends on BT
37 select CRC16 38 select CRC16
38 help 39 help
39 L2CAP (Logical Link Control and Adaptation Protocol) provides 40 L2CAP (Logical Link Control and Adaptation Protocol) provides
@@ -42,11 +43,12 @@ config BT_L2CAP
42 43
43config BT_SCO 44config BT_SCO
44 bool "SCO links support" 45 bool "SCO links support"
45 depends on BT
46 help 46 help
47 SCO link provides voice transport over Bluetooth. SCO support is 47 SCO link provides voice transport over Bluetooth. SCO support is
48 required for voice applications like Headset and Audio. 48 required for voice applications like Headset and Audio.
49 49
50endif
51
50source "net/bluetooth/rfcomm/Kconfig" 52source "net/bluetooth/rfcomm/Kconfig"
51 53
52source "net/bluetooth/bnep/Kconfig" 54source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 09d5c0987925..030a002ff8ee 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 41{
42 if (ipv6_addr_is_multicast(addr) && 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
43 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
@@ -435,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
435 eth = eth_hdr(skb); 434 eth = eth_hdr(skb);
436 435
437 memcpy(eth->h_source, br->dev->dev_addr, 6); 436 memcpy(eth->h_source, br->dev->dev_addr, 6);
438 ipv6_eth_mc_map(group, eth->h_dest);
439 eth->h_proto = htons(ETH_P_IPV6); 437 eth->h_proto = htons(ETH_P_IPV6);
440 skb_put(skb, sizeof(*eth)); 438 skb_put(skb, sizeof(*eth));
441 439
@@ -447,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
447 ip6h->payload_len = htons(8 + sizeof(*mldq)); 445 ip6h->payload_len = htons(8 + sizeof(*mldq));
448 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
449 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
450 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 448 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
449 &ip6h->saddr);
451 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 450 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
452 452
453 hopopt = (u8 *)(ip6h + 1); 453 hopopt = (u8 *)(ip6h + 1);
454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -780,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
780{ 780{
781 struct br_ip br_group; 781 struct br_ip br_group;
782 782
783 if (ipv6_is_local_multicast(group)) 783 if (!ipv6_is_transient_multicast(group))
784 return 0; 784 return 0;
785 785
786 ipv6_addr_copy(&br_group.u.ip6, group); 786 ipv6_addr_copy(&br_group.u.ip6, group);
787 br_group.proto = htons(ETH_P_IP); 787 br_group.proto = htons(ETH_P_IPV6);
788 788
789 return br_multicast_add_group(br, port, &br_group); 789 return br_multicast_add_group(br, port, &br_group);
790} 790}
@@ -1013,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1013 1013
1014 nsrcs = skb_header_pointer(skb, 1014 nsrcs = skb_header_pointer(skb,
1015 len + offsetof(struct mld2_grec, 1015 len + offsetof(struct mld2_grec,
1016 grec_mca), 1016 grec_nsrcs),
1017 sizeof(_nsrcs), &_nsrcs); 1017 sizeof(_nsrcs), &_nsrcs);
1018 if (!nsrcs) 1018 if (!nsrcs)
1019 return -EINVAL; 1019 return -EINVAL;
1020 1020
1021 if (!pskb_may_pull(skb, 1021 if (!pskb_may_pull(skb,
1022 len + sizeof(*grec) + 1022 len + sizeof(*grec) +
1023 sizeof(struct in6_addr) * (*nsrcs))) 1023 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1024 return -EINVAL; 1024 return -EINVAL;
1025 1025
1026 grec = (struct mld2_grec *)(skb->data + len); 1026 grec = (struct mld2_grec *)(skb->data + len);
1027 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1027 len += sizeof(*grec) +
1028 sizeof(struct in6_addr) * ntohs(*nsrcs);
1028 1029
1029 /* We treat these as MLDv1 reports for now. */ 1030 /* We treat these as MLDv1 reports for now. */
1030 switch (grec->grec_type) { 1031 switch (grec->grec_type) {
@@ -1340,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1340{ 1341{
1341 struct br_ip br_group; 1342 struct br_ip br_group;
1342 1343
1343 if (ipv6_is_local_multicast(group)) 1344 if (!ipv6_is_transient_multicast(group))
1344 return; 1345 return;
1345 1346
1346 ipv6_addr_copy(&br_group.u.ip6, group); 1347 ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 4b5b66d07bba..45b57b173f70 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -428,14 +428,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
428 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) 428 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
429 goto free_skb; 429 goto free_skb;
430 430
431 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 431 rt = ip_route_output_key(dev_net(dev), &fl);
432 if (!IS_ERR(rt)) {
432 /* - Bridged-and-DNAT'ed traffic doesn't 433 /* - Bridged-and-DNAT'ed traffic doesn't
433 * require ip_forwarding. */ 434 * require ip_forwarding. */
434 if (((struct dst_entry *)rt)->dev == dev) { 435 if (rt->dst.dev == dev) {
435 skb_dst_set(skb, (struct dst_entry *)rt); 436 skb_dst_set(skb, &rt->dst);
436 goto bridged_dnat; 437 goto bridged_dnat;
437 } 438 }
438 dst_release((struct dst_entry *)rt); 439 ip_rt_put(rt);
439 } 440 }
440free_skb: 441free_skb:
441 kfree_skb(skb); 442 kfree_skb(skb);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5f1825df9dca..893669caa8de 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
1107 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1107 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108 return -ENOMEM; 1108 return -ENOMEM;
1109 1109
1110 tmp.name[sizeof(tmp.name) - 1] = 0;
1111
1110 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1112 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1111 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1113 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1112 if (!newinfo) 1114 if (!newinfo)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dff633d62e5b..35b36b86d762 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
252{ 252{
253 struct kvec iov = {buf, len}; 253 struct kvec iov = {buf, len};
254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
255 int r;
255 256
256 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 if (r == -EAGAIN)
259 r = 0;
260 return r;
257} 261}
258 262
259/* 263/*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
264 size_t kvlen, size_t len, int more) 268 size_t kvlen, size_t len, int more)
265{ 269{
266 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
271 int r;
267 272
268 if (more) 273 if (more)
269 msg.msg_flags |= MSG_MORE; 274 msg.msg_flags |= MSG_MORE;
270 else 275 else
271 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
272 277
273 return kernel_sendmsg(sock, &msg, iov, kvlen, len); 278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
279 if (r == -EAGAIN)
280 r = 0;
281 return r;
274} 282}
275 283
276 284
@@ -847,6 +855,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
847 (msg->pages || msg->pagelist || msg->bio || in_trail)) 855 (msg->pages || msg->pagelist || msg->bio || in_trail))
848 kunmap(page); 856 kunmap(page);
849 857
858 if (ret == -EAGAIN)
859 ret = 0;
850 if (ret <= 0) 860 if (ret <= 0)
851 goto out; 861 goto out;
852 862
@@ -1737,16 +1747,12 @@ more_kvec:
1737 if (con->out_skip) { 1747 if (con->out_skip) {
1738 ret = write_partial_skip(con); 1748 ret = write_partial_skip(con);
1739 if (ret <= 0) 1749 if (ret <= 0)
1740 goto done; 1750 goto out;
1741 if (ret < 0) {
1742 dout("try_write write_partial_skip err %d\n", ret);
1743 goto done;
1744 }
1745 } 1751 }
1746 if (con->out_kvec_left) { 1752 if (con->out_kvec_left) {
1747 ret = write_partial_kvec(con); 1753 ret = write_partial_kvec(con);
1748 if (ret <= 0) 1754 if (ret <= 0)
1749 goto done; 1755 goto out;
1750 } 1756 }
1751 1757
1752 /* msg pages? */ 1758 /* msg pages? */
@@ -1761,11 +1767,11 @@ more_kvec:
1761 if (ret == 1) 1767 if (ret == 1)
1762 goto more_kvec; /* we need to send the footer, too! */ 1768 goto more_kvec; /* we need to send the footer, too! */
1763 if (ret == 0) 1769 if (ret == 0)
1764 goto done; 1770 goto out;
1765 if (ret < 0) { 1771 if (ret < 0) {
1766 dout("try_write write_partial_msg_pages err %d\n", 1772 dout("try_write write_partial_msg_pages err %d\n",
1767 ret); 1773 ret);
1768 goto done; 1774 goto out;
1769 } 1775 }
1770 } 1776 }
1771 1777
@@ -1789,10 +1795,9 @@ do_next:
1789 /* Nothing to do! */ 1795 /* Nothing to do! */
1790 clear_bit(WRITE_PENDING, &con->state); 1796 clear_bit(WRITE_PENDING, &con->state);
1791 dout("try_write nothing else to write.\n"); 1797 dout("try_write nothing else to write.\n");
1792done:
1793 ret = 0; 1798 ret = 0;
1794out: 1799out:
1795 dout("try_write done on %p\n", con); 1800 dout("try_write done on %p ret %d\n", con, ret);
1796 return ret; 1801 return ret;
1797} 1802}
1798 1803
@@ -1821,19 +1826,17 @@ more:
1821 dout("try_read connecting\n"); 1826 dout("try_read connecting\n");
1822 ret = read_partial_banner(con); 1827 ret = read_partial_banner(con);
1823 if (ret <= 0) 1828 if (ret <= 0)
1824 goto done;
1825 if (process_banner(con) < 0) {
1826 ret = -1;
1827 goto out; 1829 goto out;
1828 } 1830 ret = process_banner(con);
1831 if (ret < 0)
1832 goto out;
1829 } 1833 }
1830 ret = read_partial_connect(con); 1834 ret = read_partial_connect(con);
1831 if (ret <= 0) 1835 if (ret <= 0)
1832 goto done;
1833 if (process_connect(con) < 0) {
1834 ret = -1;
1835 goto out; 1836 goto out;
1836 } 1837 ret = process_connect(con);
1838 if (ret < 0)
1839 goto out;
1837 goto more; 1840 goto more;
1838 } 1841 }
1839 1842
@@ -1848,7 +1851,7 @@ more:
1848 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1851 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1849 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1852 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1850 if (ret <= 0) 1853 if (ret <= 0)
1851 goto done; 1854 goto out;
1852 con->in_base_pos += ret; 1855 con->in_base_pos += ret;
1853 if (con->in_base_pos) 1856 if (con->in_base_pos)
1854 goto more; 1857 goto more;
@@ -1859,7 +1862,7 @@ more:
1859 */ 1862 */
1860 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1863 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1861 if (ret <= 0) 1864 if (ret <= 0)
1862 goto done; 1865 goto out;
1863 dout("try_read got tag %d\n", (int)con->in_tag); 1866 dout("try_read got tag %d\n", (int)con->in_tag);
1864 switch (con->in_tag) { 1867 switch (con->in_tag) {
1865 case CEPH_MSGR_TAG_MSG: 1868 case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1873,7 @@ more:
1870 break; 1873 break;
1871 case CEPH_MSGR_TAG_CLOSE: 1874 case CEPH_MSGR_TAG_CLOSE:
1872 set_bit(CLOSED, &con->state); /* fixme */ 1875 set_bit(CLOSED, &con->state); /* fixme */
1873 goto done; 1876 goto out;
1874 default: 1877 default:
1875 goto bad_tag; 1878 goto bad_tag;
1876 } 1879 }
@@ -1882,13 +1885,12 @@ more:
1882 case -EBADMSG: 1885 case -EBADMSG:
1883 con->error_msg = "bad crc"; 1886 con->error_msg = "bad crc";
1884 ret = -EIO; 1887 ret = -EIO;
1885 goto out; 1888 break;
1886 case -EIO: 1889 case -EIO:
1887 con->error_msg = "io error"; 1890 con->error_msg = "io error";
1888 goto out; 1891 break;
1889 default:
1890 goto done;
1891 } 1892 }
1893 goto out;
1892 } 1894 }
1893 if (con->in_tag == CEPH_MSGR_TAG_READY) 1895 if (con->in_tag == CEPH_MSGR_TAG_READY)
1894 goto more; 1896 goto more;
@@ -1898,15 +1900,13 @@ more:
1898 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 1900 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1899 ret = read_partial_ack(con); 1901 ret = read_partial_ack(con);
1900 if (ret <= 0) 1902 if (ret <= 0)
1901 goto done; 1903 goto out;
1902 process_ack(con); 1904 process_ack(con);
1903 goto more; 1905 goto more;
1904 } 1906 }
1905 1907
1906done:
1907 ret = 0;
1908out: 1908out:
1909 dout("try_read done on %p\n", con); 1909 dout("try_read done on %p ret %d\n", con, ret);
1910 return ret; 1910 return ret;
1911 1911
1912bad_tag: 1912bad_tag:
diff --git a/net/core/dev.c b/net/core/dev.c
index 69a3c0817d6f..9f66de9c0572 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3096,63 +3096,31 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3096} 3096}
3097EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3097EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3098 3098
3099static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 3099static void vlan_on_bond_hook(struct sk_buff *skb)
3100 struct net_device *master)
3101{ 3100{
3102 if (skb->pkt_type == PACKET_HOST) { 3101 /*
3103 u16 *dest = (u16 *) eth_hdr(skb)->h_dest; 3102 * Make sure ARP frames received on VLAN interfaces stacked on
3103 * bonding interfaces still make their way to any base bonding
3104 * device that may have registered for a specific ptype.
3105 */
3106 if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
3107 vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
3108 skb->protocol == htons(ETH_P_ARP)) {
3109 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3104 3110
3105 memcpy(dest, master->dev_addr, ETH_ALEN); 3111 if (!skb2)
3112 return;
3113 skb2->dev = vlan_dev_real_dev(skb->dev);
3114 netif_rx(skb2);
3106 } 3115 }
3107} 3116}
3108 3117
3109/* On bonding slaves other than the currently active slave, suppress
3110 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
3111 * ARP on active-backup slaves with arp_validate enabled.
3112 */
3113static int __skb_bond_should_drop(struct sk_buff *skb,
3114 struct net_device *master)
3115{
3116 struct net_device *dev = skb->dev;
3117
3118 if (master->priv_flags & IFF_MASTER_ARPMON)
3119 dev->last_rx = jiffies;
3120
3121 if ((master->priv_flags & IFF_MASTER_ALB) &&
3122 (master->priv_flags & IFF_BRIDGE_PORT)) {
3123 /* Do address unmangle. The local destination address
3124 * will be always the one master has. Provides the right
3125 * functionality in a bridge.
3126 */
3127 skb_bond_set_mac_by_master(skb, master);
3128 }
3129
3130 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
3131 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
3132 skb->protocol == __cpu_to_be16(ETH_P_ARP))
3133 return 0;
3134
3135 if (master->priv_flags & IFF_MASTER_ALB) {
3136 if (skb->pkt_type != PACKET_BROADCAST &&
3137 skb->pkt_type != PACKET_MULTICAST)
3138 return 0;
3139 }
3140 if (master->priv_flags & IFF_MASTER_8023AD &&
3141 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
3142 return 0;
3143
3144 return 1;
3145 }
3146 return 0;
3147}
3148
3149static int __netif_receive_skb(struct sk_buff *skb) 3118static int __netif_receive_skb(struct sk_buff *skb)
3150{ 3119{
3151 struct packet_type *ptype, *pt_prev; 3120 struct packet_type *ptype, *pt_prev;
3152 rx_handler_func_t *rx_handler; 3121 rx_handler_func_t *rx_handler;
3153 struct net_device *orig_dev; 3122 struct net_device *orig_dev;
3154 struct net_device *null_or_orig; 3123 struct net_device *null_or_dev;
3155 struct net_device *orig_or_bond;
3156 int ret = NET_RX_DROP; 3124 int ret = NET_RX_DROP;
3157 __be16 type; 3125 __be16 type;
3158 3126
@@ -3167,32 +3135,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
3167 3135
3168 if (!skb->skb_iif) 3136 if (!skb->skb_iif)
3169 skb->skb_iif = skb->dev->ifindex; 3137 skb->skb_iif = skb->dev->ifindex;
3170
3171 /*
3172 * bonding note: skbs received on inactive slaves should only
3173 * be delivered to pkt handlers that are exact matches. Also
3174 * the deliver_no_wcard flag will be set. If packet handlers
3175 * are sensitive to duplicate packets these skbs will need to
3176 * be dropped at the handler.
3177 */
3178 null_or_orig = NULL;
3179 orig_dev = skb->dev; 3138 orig_dev = skb->dev;
3180 if (skb->deliver_no_wcard)
3181 null_or_orig = orig_dev;
3182 else if (netif_is_bond_slave(orig_dev)) {
3183 struct net_device *bond_master = ACCESS_ONCE(orig_dev->master);
3184
3185 if (likely(bond_master)) {
3186 if (__skb_bond_should_drop(skb, bond_master)) {
3187 skb->deliver_no_wcard = 1;
3188 /* deliver only exact match */
3189 null_or_orig = orig_dev;
3190 } else
3191 skb->dev = bond_master;
3192 }
3193 }
3194 3139
3195 __this_cpu_inc(softnet_data.processed);
3196 skb_reset_network_header(skb); 3140 skb_reset_network_header(skb);
3197 skb_reset_transport_header(skb); 3141 skb_reset_transport_header(skb);
3198 skb->mac_len = skb->network_header - skb->mac_header; 3142 skb->mac_len = skb->network_header - skb->mac_header;
@@ -3201,6 +3145,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
3201 3145
3202 rcu_read_lock(); 3146 rcu_read_lock();
3203 3147
3148another_round:
3149
3150 __this_cpu_inc(softnet_data.processed);
3151
3204#ifdef CONFIG_NET_CLS_ACT 3152#ifdef CONFIG_NET_CLS_ACT
3205 if (skb->tc_verd & TC_NCLS) { 3153 if (skb->tc_verd & TC_NCLS) {
3206 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3154 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3209,8 +3157,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3209#endif 3157#endif
3210 3158
3211 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3159 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3212 if (ptype->dev == null_or_orig || ptype->dev == skb->dev || 3160 if (!ptype->dev || ptype->dev == skb->dev) {
3213 ptype->dev == orig_dev) {
3214 if (pt_prev) 3161 if (pt_prev)
3215 ret = deliver_skb(skb, pt_prev, orig_dev); 3162 ret = deliver_skb(skb, pt_prev, orig_dev);
3216 pt_prev = ptype; 3163 pt_prev = ptype;
@@ -3224,16 +3171,20 @@ static int __netif_receive_skb(struct sk_buff *skb)
3224ncls: 3171ncls:
3225#endif 3172#endif
3226 3173
3227 /* Handle special case of bridge or macvlan */
3228 rx_handler = rcu_dereference(skb->dev->rx_handler); 3174 rx_handler = rcu_dereference(skb->dev->rx_handler);
3229 if (rx_handler) { 3175 if (rx_handler) {
3176 struct net_device *prev_dev;
3177
3230 if (pt_prev) { 3178 if (pt_prev) {
3231 ret = deliver_skb(skb, pt_prev, orig_dev); 3179 ret = deliver_skb(skb, pt_prev, orig_dev);
3232 pt_prev = NULL; 3180 pt_prev = NULL;
3233 } 3181 }
3182 prev_dev = skb->dev;
3234 skb = rx_handler(skb); 3183 skb = rx_handler(skb);
3235 if (!skb) 3184 if (!skb)
3236 goto out; 3185 goto out;
3186 if (skb->dev != prev_dev)
3187 goto another_round;
3237 } 3188 }
3238 3189
3239 if (vlan_tx_tag_present(skb)) { 3190 if (vlan_tx_tag_present(skb)) {
@@ -3248,24 +3199,17 @@ ncls:
3248 goto out; 3199 goto out;
3249 } 3200 }
3250 3201
3251 /* 3202 vlan_on_bond_hook(skb);
3252 * Make sure frames received on VLAN interfaces stacked on 3203
3253 * bonding interfaces still make their way to any base bonding 3204 /* deliver only exact match when indicated */
3254 * device that may have registered for a specific ptype. The 3205 null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
3255 * handler may have to adjust skb->dev and orig_dev.
3256 */
3257 orig_or_bond = orig_dev;
3258 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
3259 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
3260 orig_or_bond = vlan_dev_real_dev(skb->dev);
3261 }
3262 3206
3263 type = skb->protocol; 3207 type = skb->protocol;
3264 list_for_each_entry_rcu(ptype, 3208 list_for_each_entry_rcu(ptype,
3265 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3209 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3266 if (ptype->type == type && (ptype->dev == null_or_orig || 3210 if (ptype->type == type &&
3267 ptype->dev == skb->dev || ptype->dev == orig_dev || 3211 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3268 ptype->dev == orig_or_bond)) { 3212 ptype->dev == orig_dev)) {
3269 if (pt_prev) 3213 if (pt_prev)
3270 ret = deliver_skb(skb, pt_prev, orig_dev); 3214 ret = deliver_skb(skb, pt_prev, orig_dev);
3271 pt_prev = ptype; 3215 pt_prev = ptype;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..133fd22ea287 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
144 144
145 list_for_each_entry(ha, &from_list->list, list) { 145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type; 146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 __hw_addr_del(to_list, ha->addr, addr_len, type);
148 } 148 }
149} 149}
150EXPORT_SYMBOL(__hw_addr_del_multiple); 150EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 14cf560b4a3e..1eb526a848ff 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2434,8 +2434,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2434 return -ENOMEM; 2434 return -ENOMEM;
2435 2435
2436 /* initialize the next frag */ 2436 /* initialize the next frag */
2437 sk->sk_sndmsg_page = page;
2438 sk->sk_sndmsg_off = 0;
2439 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2437 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2440 skb->truesize += PAGE_SIZE; 2438 skb->truesize += PAGE_SIZE;
2441 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2439 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
@@ -2455,7 +2453,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2455 return -EFAULT; 2453 return -EFAULT;
2456 2454
2457 /* copy was successful so update the size parameters */ 2455 /* copy was successful so update the size parameters */
2458 sk->sk_sndmsg_off += copy;
2459 frag->size += copy; 2456 frag->size += copy;
2460 skb->len += copy; 2457 skb->len += copy;
2461 skb->data_len += copy; 2458 skb->data_len += copy;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d5074a567289..118392f3872e 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1193 goto err; 1193 goto err;
1194 } 1194 }
1195 1195
1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1198 err = ops->ieee_setpfc(netdev, pfc); 1198 err = ops->ieee_setpfc(netdev, pfc);
1199 if (err) 1199 if (err)
@@ -1224,6 +1224,59 @@ err:
1224 return err; 1224 return err;
1225} 1225}
1226 1226
1227static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1228 int app_nested_type, int app_info_type,
1229 int app_entry_type)
1230{
1231 struct dcb_peer_app_info info;
1232 struct dcb_app *table = NULL;
1233 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1234 u16 app_count;
1235 int err;
1236
1237
1238 /**
1239 * retrieve the peer app configuration form the driver. If the driver
1240 * handlers fail exit without doing anything
1241 */
1242 err = ops->peer_getappinfo(netdev, &info, &app_count);
1243 if (!err && app_count) {
1244 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1245 if (!table)
1246 return -ENOMEM;
1247
1248 err = ops->peer_getapptable(netdev, table);
1249 }
1250
1251 if (!err) {
1252 u16 i;
1253 struct nlattr *app;
1254
1255 /**
1256 * build the message, from here on the only possible failure
1257 * is due to the skb size
1258 */
1259 err = -EMSGSIZE;
1260
1261 app = nla_nest_start(skb, app_nested_type);
1262 if (!app)
1263 goto nla_put_failure;
1264
1265 if (app_info_type)
1266 NLA_PUT(skb, app_info_type, sizeof(info), &info);
1267
1268 for (i = 0; i < app_count; i++)
1269 NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1270 &table[i]);
1271
1272 nla_nest_end(skb, app);
1273 }
1274 err = 0;
1275
1276nla_put_failure:
1277 kfree(table);
1278 return err;
1279}
1227 1280
1228/* Handle IEEE 802.1Qaz GET commands. */ 1281/* Handle IEEE 802.1Qaz GET commands. */
1229static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, 1282static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
@@ -1288,6 +1341,30 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1288 spin_unlock(&dcb_lock); 1341 spin_unlock(&dcb_lock);
1289 nla_nest_end(skb, app); 1342 nla_nest_end(skb, app);
1290 1343
1344 /* get peer info if available */
1345 if (ops->ieee_peer_getets) {
1346 struct ieee_ets ets;
1347 err = ops->ieee_peer_getets(netdev, &ets);
1348 if (!err)
1349 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
1350 }
1351
1352 if (ops->ieee_peer_getpfc) {
1353 struct ieee_pfc pfc;
1354 err = ops->ieee_peer_getpfc(netdev, &pfc);
1355 if (!err)
1356 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
1357 }
1358
1359 if (ops->peer_getappinfo && ops->peer_getapptable) {
1360 err = dcbnl_build_peer_app(netdev, skb,
1361 DCB_ATTR_IEEE_PEER_APP,
1362 DCB_ATTR_IEEE_APP_UNSPEC,
1363 DCB_ATTR_IEEE_APP);
1364 if (err)
1365 goto nla_put_failure;
1366 }
1367
1291 nla_nest_end(skb, ieee); 1368 nla_nest_end(skb, ieee);
1292 nlmsg_end(skb, nlh); 1369 nlmsg_end(skb, nlh);
1293 1370
@@ -1441,6 +1518,71 @@ err:
1441 return ret; 1518 return ret;
1442} 1519}
1443 1520
1521/* Handle CEE DCBX GET commands. */
1522static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1523 u32 pid, u32 seq, u16 flags)
1524{
1525 struct sk_buff *skb;
1526 struct nlmsghdr *nlh;
1527 struct dcbmsg *dcb;
1528 struct nlattr *cee;
1529 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1530 int err;
1531
1532 if (!ops)
1533 return -EOPNOTSUPP;
1534
1535 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1536 if (!skb)
1537 return -ENOBUFS;
1538
1539 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1540
1541 dcb = NLMSG_DATA(nlh);
1542 dcb->dcb_family = AF_UNSPEC;
1543 dcb->cmd = DCB_CMD_CEE_GET;
1544
1545 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1546
1547 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1548 if (!cee)
1549 goto nla_put_failure;
1550
1551 /* get peer info if available */
1552 if (ops->cee_peer_getpg) {
1553 struct cee_pg pg;
1554 err = ops->cee_peer_getpg(netdev, &pg);
1555 if (!err)
1556 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1557 }
1558
1559 if (ops->cee_peer_getpfc) {
1560 struct cee_pfc pfc;
1561 err = ops->cee_peer_getpfc(netdev, &pfc);
1562 if (!err)
1563 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1564 }
1565
1566 if (ops->peer_getappinfo && ops->peer_getapptable) {
1567 err = dcbnl_build_peer_app(netdev, skb,
1568 DCB_ATTR_CEE_PEER_APP_TABLE,
1569 DCB_ATTR_CEE_PEER_APP_INFO,
1570 DCB_ATTR_CEE_PEER_APP);
1571 if (err)
1572 goto nla_put_failure;
1573 }
1574
1575 nla_nest_end(skb, cee);
1576 nlmsg_end(skb, nlh);
1577
1578 return rtnl_unicast(skb, &init_net, pid);
1579nla_put_failure:
1580 nlmsg_cancel(skb, nlh);
1581nlmsg_failure:
1582 kfree_skb(skb);
1583 return -1;
1584}
1585
1444static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1586static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1445{ 1587{
1446 struct net *net = sock_net(skb->sk); 1588 struct net *net = sock_net(skb->sk);
@@ -1570,6 +1712,10 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1570 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq, 1712 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1571 nlh->nlmsg_flags); 1713 nlh->nlmsg_flags);
1572 goto out; 1714 goto out;
1715 case DCB_CMD_CEE_GET:
1716 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
1717 nlh->nlmsg_flags);
1718 goto out;
1573 default: 1719 default:
1574 goto errout; 1720 goto errout;
1575 } 1721 }
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
614 /* Caller (dccp_v4_do_rcv) will send Reset */ 614 /* Caller (dccp_v4_do_rcv) will send Reset */
615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616 return 1; 616 return 1;
617 } else if (sk->sk_state == DCCP_CLOSED) {
618 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619 return 1;
617 } 620 }
618 621
619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
668 } 671 }
669 672
670 switch (sk->sk_state) { 673 switch (sk->sk_state) {
671 case DCCP_CLOSED:
672 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
673 return 1;
674
675 case DCCP_REQUESTING: 674 case DCCP_REQUESTING:
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 675 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677 if (queued >= 0) 676 if (queued >= 0)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 937989199c80..7882377bc62e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -46,7 +46,6 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
46 __be16 orig_sport, orig_dport; 46 __be16 orig_sport, orig_dport;
47 struct rtable *rt; 47 struct rtable *rt;
48 __be32 daddr, nexthop; 48 __be32 daddr, nexthop;
49 int tmp;
50 int err; 49 int err;
51 50
52 dp->dccps_role = DCCP_ROLE_CLIENT; 51 dp->dccps_role = DCCP_ROLE_CLIENT;
@@ -66,12 +65,12 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
66 65
67 orig_sport = inet->inet_sport; 66 orig_sport = inet->inet_sport;
68 orig_dport = usin->sin_port; 67 orig_dport = usin->sin_port;
69 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, 68 rt = ip_route_connect(nexthop, inet->inet_saddr,
70 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 69 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
71 IPPROTO_DCCP, 70 IPPROTO_DCCP,
72 orig_sport, orig_dport, sk, 1); 71 orig_sport, orig_dport, sk, true);
73 if (tmp < 0) 72 if (IS_ERR(rt))
74 return tmp; 73 return PTR_ERR(rt);
75 74
76 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 75 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
77 ip_rt_put(rt); 76 ip_rt_put(rt);
@@ -102,12 +101,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
102 if (err != 0) 101 if (err != 0)
103 goto failure; 102 goto failure;
104 103
105 err = ip_route_newports(&rt, IPPROTO_DCCP, 104 rt = ip_route_newports(rt, IPPROTO_DCCP,
106 orig_sport, orig_dport, 105 orig_sport, orig_dport,
107 inet->inet_sport, inet->inet_dport, sk); 106 inet->inet_sport, inet->inet_dport, sk);
108 if (err != 0) 107 if (IS_ERR(rt)) {
108 rt = NULL;
109 goto failure; 109 goto failure;
110 110 }
111 /* OK, now commit destination to socket. */ 111 /* OK, now commit destination to socket. */
112 sk_setup_caps(sk, &rt->dst); 112 sk_setup_caps(sk, &rt->dst);
113 113
@@ -475,7 +475,8 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
475 }; 475 };
476 476
477 security_skb_classify_flow(skb, &fl); 477 security_skb_classify_flow(skb, &fl);
478 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { 478 rt = ip_route_output_flow(net, &fl, sk);
479 if (IS_ERR(rt)) {
479 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 480 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
480 return NULL; 481 return NULL;
481 } 482 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 460d545a6509..5efc57f5e605 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -162,15 +162,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
162 fl.fl_ip_sport = inet->inet_sport; 162 fl.fl_ip_sport = inet->inet_sport;
163 security_sk_classify_flow(sk, &fl); 163 security_sk_classify_flow(sk, &fl);
164 164
165 err = ip6_dst_lookup(sk, &dst, &fl); 165 dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
166 if (err) { 166 if (IS_ERR(dst)) {
167 sk->sk_err_soft = -err; 167 sk->sk_err_soft = -PTR_ERR(dst);
168 goto out;
169 }
170
171 err = xfrm_lookup(net, &dst, &fl, sk, 0);
172 if (err < 0) {
173 sk->sk_err_soft = -err;
174 goto out; 168 goto out;
175 } 169 }
176 } else 170 } else
@@ -267,16 +261,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
267 261
268 final_p = fl6_update_dst(&fl, opt, &final); 262 final_p = fl6_update_dst(&fl, opt, &final);
269 263
270 err = ip6_dst_lookup(sk, &dst, &fl); 264 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
271 if (err) 265 if (IS_ERR(dst)) {
272 goto done; 266 err = PTR_ERR(dst);
273 267 dst = NULL;
274 if (final_p)
275 ipv6_addr_copy(&fl.fl6_dst, final_p);
276
277 err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
278 if (err < 0)
279 goto done; 268 goto done;
269 }
280 270
281 skb = dccp_make_response(sk, dst, req); 271 skb = dccp_make_response(sk, dst, req);
282 if (skb != NULL) { 272 if (skb != NULL) {
@@ -338,14 +328,13 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
338 security_skb_classify_flow(rxskb, &fl); 328 security_skb_classify_flow(rxskb, &fl);
339 329
340 /* sk = NULL, but it is safe for now. RST socket required. */ 330 /* sk = NULL, but it is safe for now. RST socket required. */
341 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 331 dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
342 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 332 if (!IS_ERR(dst)) {
343 skb_dst_set(skb, dst); 333 skb_dst_set(skb, dst);
344 ip6_xmit(ctl_sk, skb, &fl, NULL); 334 ip6_xmit(ctl_sk, skb, &fl, NULL);
345 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 335 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
346 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 336 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
347 return; 337 return;
348 }
349 } 338 }
350 339
351 kfree_skb(skb); 340 kfree_skb(skb);
@@ -550,13 +539,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
550 fl.fl_ip_sport = inet_rsk(req)->loc_port; 539 fl.fl_ip_sport = inet_rsk(req)->loc_port;
551 security_sk_classify_flow(sk, &fl); 540 security_sk_classify_flow(sk, &fl);
552 541
553 if (ip6_dst_lookup(sk, &dst, &fl)) 542 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
554 goto out; 543 if (IS_ERR(dst))
555
556 if (final_p)
557 ipv6_addr_copy(&fl.fl6_dst, final_p);
558
559 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
560 goto out; 544 goto out;
561 } 545 }
562 546
@@ -979,19 +963,10 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
979 963
980 final_p = fl6_update_dst(&fl, np->opt, &final); 964 final_p = fl6_update_dst(&fl, np->opt, &final);
981 965
982 err = ip6_dst_lookup(sk, &dst, &fl); 966 dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
983 if (err) 967 if (IS_ERR(dst)) {
968 err = PTR_ERR(dst);
984 goto failure; 969 goto failure;
985
986 if (final_p)
987 ipv6_addr_copy(&fl.fl6_dst, final_p);
988
989 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
990 if (err < 0) {
991 if (err == -EREMOTE)
992 err = ip6_dst_blackhole(sk, &dst, &fl);
993 if (err < 0)
994 goto failure;
995 } 970 }
996 971
997 if (saddr == NULL) { 972 if (saddr == NULL) {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 06c054d5ccba..484fdbf92bd8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1222,7 +1222,11 @@ static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int f
1222 1222
1223 err = __dn_route_output_key(pprt, flp, flags); 1223 err = __dn_route_output_key(pprt, flp, flags);
1224 if (err == 0 && flp->proto) { 1224 if (err == 0 && flp->proto) {
1225 err = xfrm_lookup(&init_net, pprt, flp, NULL, 0); 1225 *pprt = xfrm_lookup(&init_net, *pprt, flp, NULL, 0);
1226 if (IS_ERR(*pprt)) {
1227 err = PTR_ERR(*pprt);
1228 *pprt = NULL;
1229 }
1226 } 1230 }
1227 return err; 1231 return err;
1228} 1232}
@@ -1233,8 +1237,13 @@ int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock
1233 1237
1234 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1238 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1235 if (err == 0 && fl->proto) { 1239 if (err == 0 && fl->proto) {
1236 err = xfrm_lookup(&init_net, pprt, fl, sk, 1240 if (!(flags & MSG_DONTWAIT))
1237 (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT); 1241 fl->flags |= FLOWI_FLAG_CAN_SLEEP;
1242 *pprt = xfrm_lookup(&init_net, *pprt, fl, sk, 0);
1243 if (IS_ERR(*pprt)) {
1244 err = PTR_ERR(*pprt);
1245 *pprt = NULL;
1246 }
1238 } 1247 }
1239 return err; 1248 return err;
1240} 1249}
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a6af39..cfa7a5e1c5c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
67 size_t result_len = 0; 67 size_t result_len = 0;
68 const char *data = _data, *end, *opt; 68 const char *data = _data, *end, *opt;
69 69
70 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%*.*s',%zu",
71 key->serial, key->description, data, datalen); 71 key->serial, key->description,
72 (int)datalen, (int)datalen, data, datalen);
72 73
73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
74 return -EINVAL; 75 return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
217 seq_printf(m, ": %u", key->datalen); 218 seq_printf(m, ": %u", key->datalen);
218} 219}
219 220
221/*
222 * read the DNS data
223 * - the key's semaphore is read-locked
224 */
225static long dns_resolver_read(const struct key *key,
226 char __user *buffer, size_t buflen)
227{
228 if (key->type_data.x[0])
229 return key->type_data.x[0];
230
231 return user_read(key, buffer, buflen);
232}
233
220struct key_type key_type_dns_resolver = { 234struct key_type key_type_dns_resolver = {
221 .name = "dns_resolver", 235 .name = "dns_resolver",
222 .instantiate = dns_resolver_instantiate, 236 .instantiate = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
224 .revoke = user_revoke, 238 .revoke = user_revoke,
225 .destroy = user_destroy, 239 .destroy = user_destroy,
226 .describe = dns_resolver_describe, 240 .describe = dns_resolver_describe,
227 .read = user_read, 241 .read = dns_resolver_read,
228}; 242};
229 243
230static int __init init_dns_resolver(void) 244static int __init init_dns_resolver(void)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7ceb80447631..35a502055018 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1101,23 +1101,20 @@ int sysctl_ip_dynaddr __read_mostly;
1101static int inet_sk_reselect_saddr(struct sock *sk) 1101static int inet_sk_reselect_saddr(struct sock *sk)
1102{ 1102{
1103 struct inet_sock *inet = inet_sk(sk); 1103 struct inet_sock *inet = inet_sk(sk);
1104 int err;
1105 struct rtable *rt;
1106 __be32 old_saddr = inet->inet_saddr; 1104 __be32 old_saddr = inet->inet_saddr;
1107 __be32 new_saddr;
1108 __be32 daddr = inet->inet_daddr; 1105 __be32 daddr = inet->inet_daddr;
1106 struct rtable *rt;
1107 __be32 new_saddr;
1109 1108
1110 if (inet->opt && inet->opt->srr) 1109 if (inet->opt && inet->opt->srr)
1111 daddr = inet->opt->faddr; 1110 daddr = inet->opt->faddr;
1112 1111
1113 /* Query new route. */ 1112 /* Query new route. */
1114 err = ip_route_connect(&rt, daddr, 0, 1113 rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk),
1115 RT_CONN_FLAGS(sk), 1114 sk->sk_bound_dev_if, sk->sk_protocol,
1116 sk->sk_bound_dev_if, 1115 inet->inet_sport, inet->inet_dport, sk, false);
1117 sk->sk_protocol, 1116 if (IS_ERR(rt))
1118 inet->inet_sport, inet->inet_dport, sk, 0); 1117 return PTR_ERR(rt);
1119 if (err)
1120 return err;
1121 1118
1122 sk_setup_caps(sk, &rt->dst); 1119 sk_setup_caps(sk, &rt->dst);
1123 1120
@@ -1160,7 +1157,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1160 daddr = inet->inet_daddr; 1157 daddr = inet->inet_daddr;
1161 if (inet->opt && inet->opt->srr) 1158 if (inet->opt && inet->opt->srr)
1162 daddr = inet->opt->faddr; 1159 daddr = inet->opt->faddr;
1163{ 1160 {
1164 struct flowi fl = { 1161 struct flowi fl = {
1165 .oif = sk->sk_bound_dev_if, 1162 .oif = sk->sk_bound_dev_if,
1166 .mark = sk->sk_mark, 1163 .mark = sk->sk_mark,
@@ -1174,11 +1171,14 @@ int inet_sk_rebuild_header(struct sock *sk)
1174 }; 1171 };
1175 1172
1176 security_sk_classify_flow(sk, &fl); 1173 security_sk_classify_flow(sk, &fl);
1177 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); 1174 rt = ip_route_output_flow(sock_net(sk), &fl, sk);
1178} 1175 }
1179 if (!err) 1176 if (!IS_ERR(rt)) {
1177 err = 0;
1180 sk_setup_caps(sk, &rt->dst); 1178 sk_setup_caps(sk, &rt->dst);
1181 else { 1179 } else {
1180 err = PTR_ERR(rt);
1181
1182 /* Routing failed... */ 1182 /* Routing failed... */
1183 sk->sk_route_caps = 0; 1183 sk->sk_route_caps = 0;
1184 /* 1184 /*
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7927589813b5..fa9988da1da4 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -440,7 +440,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
440 /*unsigned long now; */ 440 /*unsigned long now; */
441 struct net *net = dev_net(dev); 441 struct net *net = dev_net(dev);
442 442
443 if (ip_route_output_key(net, &rt, &fl) < 0) 443 rt = ip_route_output_key(net, &fl);
444 if (IS_ERR(rt))
444 return 1; 445 return 1;
445 if (rt->dst.dev != dev) { 446 if (rt->dst.dev != dev) {
446 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); 447 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
@@ -1063,10 +1064,10 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1063 if (dev == NULL) { 1064 if (dev == NULL) {
1064 struct flowi fl = { .fl4_dst = ip, 1065 struct flowi fl = { .fl4_dst = ip,
1065 .fl4_tos = RTO_ONLINK }; 1066 .fl4_tos = RTO_ONLINK };
1066 struct rtable *rt; 1067 struct rtable *rt = ip_route_output_key(net, &fl);
1067 err = ip_route_output_key(net, &rt, &fl); 1068
1068 if (err != 0) 1069 if (IS_ERR(rt))
1069 return err; 1070 return PTR_ERR(rt);
1070 dev = rt->dst.dev; 1071 dev = rt->dst.dev;
1071 ip_rt_put(rt); 1072 ip_rt_put(rt);
1072 if (!dev) 1073 if (!dev)
@@ -1177,7 +1178,6 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
1177static int arp_req_delete(struct net *net, struct arpreq *r, 1178static int arp_req_delete(struct net *net, struct arpreq *r,
1178 struct net_device *dev) 1179 struct net_device *dev)
1179{ 1180{
1180 int err;
1181 __be32 ip; 1181 __be32 ip;
1182 1182
1183 if (r->arp_flags & ATF_PUBL) 1183 if (r->arp_flags & ATF_PUBL)
@@ -1187,10 +1187,9 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1187 if (dev == NULL) { 1187 if (dev == NULL) {
1188 struct flowi fl = { .fl4_dst = ip, 1188 struct flowi fl = { .fl4_dst = ip,
1189 .fl4_tos = RTO_ONLINK }; 1189 .fl4_tos = RTO_ONLINK };
1190 struct rtable *rt; 1190 struct rtable *rt = ip_route_output_key(net, &fl);
1191 err = ip_route_output_key(net, &rt, &fl); 1191 if (IS_ERR(rt))
1192 if (err != 0) 1192 return PTR_ERR(rt);
1193 return err;
1194 dev = rt->dst.dev; 1193 dev = rt->dst.dev;
1195 ip_rt_put(rt); 1194 ip_rt_put(rt);
1196 if (!dev) 1195 if (!dev)
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 174be6caa5c8..85bd24ca4f6d 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -46,11 +46,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
46 if (!saddr) 46 if (!saddr)
47 saddr = inet->mc_addr; 47 saddr = inet->mc_addr;
48 } 48 }
49 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, 49 rt = ip_route_connect(usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 50 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 51 sk->sk_protocol,
52 inet->inet_sport, usin->sin_port, sk, 1); 52 inet->inet_sport, usin->sin_port, sk, true);
53 if (err) { 53 if (IS_ERR(rt)) {
54 err = PTR_ERR(rt);
54 if (err == -ENETUNREACH) 55 if (err == -ENETUNREACH)
55 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 56 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
56 return err; 57 return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 90389281d97a..ff53860d1e56 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -111,7 +111,7 @@ static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
111 111
112static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) 112static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
113{ 113{
114 unsigned int hash = inet_addr_hash(net, ifa->ifa_address); 114 unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
115 115
116 spin_lock(&inet_addr_hash_lock); 116 spin_lock(&inet_addr_hash_lock);
117 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); 117 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -146,7 +146,7 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
146 146
147 if (!net_eq(dev_net(dev), net)) 147 if (!net_eq(dev_net(dev), net))
148 continue; 148 continue;
149 if (ifa->ifa_address == addr) { 149 if (ifa->ifa_local == addr) {
150 result = dev; 150 result = dev;
151 break; 151 break;
152 } 152 }
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ad0778a3fa53..1d2233cd99e6 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -890,10 +890,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
890#ifdef CONFIG_IP_ROUTE_MULTIPATH 890#ifdef CONFIG_IP_ROUTE_MULTIPATH
891 fib_sync_up(dev); 891 fib_sync_up(dev);
892#endif 892#endif
893 fib_update_nh_saddrs(dev);
893 rt_cache_flush(dev_net(dev), -1); 894 rt_cache_flush(dev_net(dev), -1);
894 break; 895 break;
895 case NETDEV_DOWN: 896 case NETDEV_DOWN:
896 fib_del_ifaddr(ifa); 897 fib_del_ifaddr(ifa);
898 fib_update_nh_saddrs(dev);
897 if (ifa->ifa_dev->ifa_list == NULL) { 899 if (ifa->ifa_dev->ifa_list == NULL) {
898 /* Last address was deleted from this interface. 900 /* Last address was deleted from this interface.
899 * Disable IP. 901 * Disable IP.
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index d5c40d8f6632..84db2da5c848 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -51,4 +51,11 @@ static inline void fib_result_assign(struct fib_result *res,
51 res->fi = fi; 51 res->fi = fi;
52} 52}
53 53
54struct fib_prop {
55 int error;
56 u8 scope;
57};
58
59extern const struct fib_prop fib_props[RTN_MAX + 1];
60
54#endif /* _FIB_LOOKUP_H */ 61#endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 562f34cd9303..d73d7581b51f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -90,11 +90,7 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
90#define endfor_nexthops(fi) } 90#define endfor_nexthops(fi) }
91 91
92 92
93static const struct 93const struct fib_prop fib_props[RTN_MAX + 1] = {
94{
95 int error;
96 u8 scope;
97} fib_props[RTN_MAX + 1] = {
98 [RTN_UNSPEC] = { 94 [RTN_UNSPEC] = {
99 .error = 0, 95 .error = 0,
100 .scope = RT_SCOPE_NOWHERE, 96 .scope = RT_SCOPE_NOWHERE,
@@ -707,6 +703,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
707 int nhs = 1; 703 int nhs = 1;
708 struct net *net = cfg->fc_nlinfo.nl_net; 704 struct net *net = cfg->fc_nlinfo.nl_net;
709 705
706 if (cfg->fc_type > RTN_MAX)
707 goto err_inval;
708
710 /* Fast check to catch the most weird cases */ 709 /* Fast check to catch the most weird cases */
711 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 710 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
712 goto err_inval; 711 goto err_inval;
@@ -812,6 +811,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
812 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 811 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
813 goto err_inval; 812 goto err_inval;
814 goto link_it; 813 goto link_it;
814 } else {
815 switch (cfg->fc_type) {
816 case RTN_UNICAST:
817 case RTN_LOCAL:
818 case RTN_BROADCAST:
819 case RTN_ANYCAST:
820 case RTN_MULTICAST:
821 break;
822 default:
823 goto err_inval;
824 }
815 } 825 }
816 826
817 if (cfg->fc_scope > RT_SCOPE_HOST) 827 if (cfg->fc_scope > RT_SCOPE_HOST)
@@ -843,6 +853,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
843 goto err_inval; 853 goto err_inval;
844 } 854 }
845 855
856 change_nexthops(fi) {
857 nexthop_nh->nh_cfg_scope = cfg->fc_scope;
858 nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev,
859 nexthop_nh->nh_gw,
860 nexthop_nh->nh_cfg_scope);
861 } endfor_nexthops(fi)
862
846link_it: 863link_it:
847 ofi = fib_find_info(fi); 864 ofi = fib_find_info(fi);
848 if (ofi) { 865 if (ofi) {
@@ -888,87 +905,6 @@ failure:
888 return ERR_PTR(err); 905 return ERR_PTR(err);
889} 906}
890 907
891/* Note! fib_semantic_match intentionally uses RCU list functions. */
892int fib_semantic_match(struct fib_table *tb, struct list_head *head,
893 const struct flowi *flp, struct fib_result *res,
894 int prefixlen, int fib_flags)
895{
896 struct fib_alias *fa;
897 int nh_sel = 0;
898
899 list_for_each_entry_rcu(fa, head, fa_list) {
900 int err;
901
902 if (fa->fa_tos &&
903 fa->fa_tos != flp->fl4_tos)
904 continue;
905
906 if (fa->fa_scope < flp->fl4_scope)
907 continue;
908
909 fib_alias_accessed(fa);
910
911 err = fib_props[fa->fa_type].error;
912 if (err == 0) {
913 struct fib_info *fi = fa->fa_info;
914
915 if (fi->fib_flags & RTNH_F_DEAD)
916 continue;
917
918 switch (fa->fa_type) {
919 case RTN_UNICAST:
920 case RTN_LOCAL:
921 case RTN_BROADCAST:
922 case RTN_ANYCAST:
923 case RTN_MULTICAST:
924 for_nexthops(fi) {
925 if (nh->nh_flags & RTNH_F_DEAD)
926 continue;
927 if (!flp->oif || flp->oif == nh->nh_oif)
928 break;
929 }
930#ifdef CONFIG_IP_ROUTE_MULTIPATH
931 if (nhsel < fi->fib_nhs) {
932 nh_sel = nhsel;
933 goto out_fill_res;
934 }
935#else
936 if (nhsel < 1)
937 goto out_fill_res;
938#endif
939 endfor_nexthops(fi);
940 continue;
941
942 default:
943 pr_warning("fib_semantic_match bad type %#x\n",
944 fa->fa_type);
945 return -EINVAL;
946 }
947 }
948 return err;
949 }
950 return 1;
951
952out_fill_res:
953 res->prefixlen = prefixlen;
954 res->nh_sel = nh_sel;
955 res->type = fa->fa_type;
956 res->scope = fa->fa_scope;
957 res->fi = fa->fa_info;
958 res->table = tb;
959 res->fa_head = head;
960 if (!(fib_flags & FIB_LOOKUP_NOREF))
961 atomic_inc(&res->fi->fib_clntref);
962 return 0;
963}
964
965/* Find appropriate source address to this destination */
966
967__be32 __fib_res_prefsrc(struct fib_result *res)
968{
969 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
970}
971
972int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 908int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
973 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, 909 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
974 struct fib_info *fi, unsigned int flags) 910 struct fib_info *fi, unsigned int flags)
@@ -1192,6 +1128,24 @@ out:
1192 return; 1128 return;
1193} 1129}
1194 1130
1131void fib_update_nh_saddrs(struct net_device *dev)
1132{
1133 struct hlist_head *head;
1134 struct hlist_node *node;
1135 struct fib_nh *nh;
1136 unsigned int hash;
1137
1138 hash = fib_devindex_hashfn(dev->ifindex);
1139 head = &fib_info_devhash[hash];
1140 hlist_for_each_entry(nh, node, head, nh_hash) {
1141 if (nh->nh_dev != dev)
1142 continue;
1143 nh->nh_saddr = inet_select_addr(nh->nh_dev,
1144 nh->nh_gw,
1145 nh->nh_cfg_scope);
1146 }
1147}
1148
1195#ifdef CONFIG_IP_ROUTE_MULTIPATH 1149#ifdef CONFIG_IP_ROUTE_MULTIPATH
1196 1150
1197/* 1151/*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index edf3b0997e01..a4109a544778 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1349,23 +1349,58 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1349 struct hlist_node *node; 1349 struct hlist_node *node;
1350 1350
1351 hlist_for_each_entry_rcu(li, node, hhead, hlist) { 1351 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1352 int err; 1352 struct fib_alias *fa;
1353 int plen = li->plen; 1353 int plen = li->plen;
1354 __be32 mask = inet_make_mask(plen); 1354 __be32 mask = inet_make_mask(plen);
1355 1355
1356 if (l->key != (key & ntohl(mask))) 1356 if (l->key != (key & ntohl(mask)))
1357 continue; 1357 continue;
1358 1358
1359 err = fib_semantic_match(tb, &li->falh, flp, res, plen, fib_flags); 1359 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1360 struct fib_info *fi = fa->fa_info;
1361 int nhsel, err;
1360 1362
1363 if (fa->fa_tos && fa->fa_tos != flp->fl4_tos)
1364 continue;
1365 if (fa->fa_scope < flp->fl4_scope)
1366 continue;
1367 fib_alias_accessed(fa);
1368 err = fib_props[fa->fa_type].error;
1369 if (err) {
1361#ifdef CONFIG_IP_FIB_TRIE_STATS 1370#ifdef CONFIG_IP_FIB_TRIE_STATS
1362 if (err <= 0) 1371 t->stats.semantic_match_miss++;
1363 t->stats.semantic_match_passed++; 1372#endif
1364 else 1373 return 1;
1365 t->stats.semantic_match_miss++; 1374 }
1375 if (fi->fib_flags & RTNH_F_DEAD)
1376 continue;
1377 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1378 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1379
1380 if (nh->nh_flags & RTNH_F_DEAD)
1381 continue;
1382 if (flp->oif && flp->oif != nh->nh_oif)
1383 continue;
1384
1385#ifdef CONFIG_IP_FIB_TRIE_STATS
1386 t->stats.semantic_match_passed++;
1387#endif
1388 res->prefixlen = plen;
1389 res->nh_sel = nhsel;
1390 res->type = fa->fa_type;
1391 res->scope = fa->fa_scope;
1392 res->fi = fi;
1393 res->table = tb;
1394 res->fa_head = &li->falh;
1395 if (!(fib_flags & FIB_LOOKUP_NOREF))
1396 atomic_inc(&res->fi->fib_clntref);
1397 return 0;
1398 }
1399 }
1400
1401#ifdef CONFIG_IP_FIB_TRIE_STATS
1402 t->stats.semantic_match_miss++;
1366#endif 1403#endif
1367 if (err <= 0)
1368 return err;
1369 } 1404 }
1370 1405
1371 return 1; 1406 return 1;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ad2bcf1b69ae..1771ce662548 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -358,7 +358,8 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
358 .fl4_tos = RT_TOS(ip_hdr(skb)->tos), 358 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
359 .proto = IPPROTO_ICMP }; 359 .proto = IPPROTO_ICMP };
360 security_skb_classify_flow(skb, &fl); 360 security_skb_classify_flow(skb, &fl);
361 if (ip_route_output_key(net, &rt, &fl)) 361 rt = ip_route_output_key(net, &fl);
362 if (IS_ERR(rt))
362 goto out_unlock; 363 goto out_unlock;
363 } 364 }
364 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type, 365 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
@@ -369,6 +370,94 @@ out_unlock:
369 icmp_xmit_unlock(sk); 370 icmp_xmit_unlock(sk);
370} 371}
371 372
373static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
374 struct iphdr *iph,
375 __be32 saddr, u8 tos,
376 int type, int code,
377 struct icmp_bxm *param)
378{
379 struct flowi fl = {
380 .fl4_dst = (param->replyopts.srr ?
381 param->replyopts.faddr : iph->saddr),
382 .fl4_src = saddr,
383 .fl4_tos = RT_TOS(tos),
384 .proto = IPPROTO_ICMP,
385 .fl_icmp_type = type,
386 .fl_icmp_code = code,
387 };
388 struct rtable *rt, *rt2;
389 int err;
390
391 security_skb_classify_flow(skb_in, &fl);
392 rt = __ip_route_output_key(net, &fl);
393 if (IS_ERR(rt))
394 return rt;
395
396 /* No need to clone since we're just using its address. */
397 rt2 = rt;
398
399 if (!fl.fl4_src)
400 fl.fl4_src = rt->rt_src;
401
402 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, &fl, NULL, 0);
403 if (!IS_ERR(rt)) {
404 if (rt != rt2)
405 return rt;
406 } else if (PTR_ERR(rt) == -EPERM) {
407 rt = NULL;
408 } else
409 return rt;
410
411 err = xfrm_decode_session_reverse(skb_in, &fl, AF_INET);
412 if (err)
413 goto relookup_failed;
414
415 if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL) {
416 rt2 = __ip_route_output_key(net, &fl);
417 if (IS_ERR(rt2))
418 err = PTR_ERR(rt2);
419 } else {
420 struct flowi fl2 = {};
421 unsigned long orefdst;
422
423 fl2.fl4_dst = fl.fl4_src;
424 rt2 = ip_route_output_key(net, &fl2);
425 if (IS_ERR(rt2)) {
426 err = PTR_ERR(rt2);
427 goto relookup_failed;
428 }
429 /* Ugh! */
430 orefdst = skb_in->_skb_refdst; /* save old refdst */
431 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
432 RT_TOS(tos), rt2->dst.dev);
433
434 dst_release(&rt2->dst);
435 rt2 = skb_rtable(skb_in);
436 skb_in->_skb_refdst = orefdst; /* restore old refdst */
437 }
438
439 if (err)
440 goto relookup_failed;
441
442 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, &fl, NULL, XFRM_LOOKUP_ICMP);
443 if (!IS_ERR(rt2)) {
444 dst_release(&rt->dst);
445 rt = rt2;
446 } else if (PTR_ERR(rt2) == -EPERM) {
447 if (rt)
448 dst_release(&rt->dst);
449 return rt2;
450 } else {
451 err = PTR_ERR(rt2);
452 goto relookup_failed;
453 }
454 return rt;
455
456relookup_failed:
457 if (rt)
458 return rt;
459 return ERR_PTR(err);
460}
372 461
373/* 462/*
374 * Send an ICMP message in response to a situation 463 * Send an ICMP message in response to a situation
@@ -474,7 +563,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
474 rcu_read_lock(); 563 rcu_read_lock();
475 if (rt_is_input_route(rt) && 564 if (rt_is_input_route(rt) &&
476 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 565 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
477 dev = dev_get_by_index_rcu(net, rt->fl.iif); 566 dev = dev_get_by_index_rcu(net, rt->rt_iif);
478 567
479 if (dev) 568 if (dev)
480 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); 569 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -506,86 +595,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
506 ipc.opt = &icmp_param.replyopts; 595 ipc.opt = &icmp_param.replyopts;
507 ipc.tx_flags = 0; 596 ipc.tx_flags = 0;
508 597
509 { 598 rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
510 struct flowi fl = { 599 type, code, &icmp_param);
511 .fl4_dst = icmp_param.replyopts.srr ? 600 if (IS_ERR(rt))
512 icmp_param.replyopts.faddr : iph->saddr, 601 goto out_unlock;
513 .fl4_src = saddr,
514 .fl4_tos = RT_TOS(tos),
515 .proto = IPPROTO_ICMP,
516 .fl_icmp_type = type,
517 .fl_icmp_code = code,
518 };
519 int err;
520 struct rtable *rt2;
521
522 security_skb_classify_flow(skb_in, &fl);
523 if (__ip_route_output_key(net, &rt, &fl))
524 goto out_unlock;
525
526 /* No need to clone since we're just using its address. */
527 rt2 = rt;
528
529 if (!fl.nl_u.ip4_u.saddr)
530 fl.nl_u.ip4_u.saddr = rt->rt_src;
531
532 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
533 switch (err) {
534 case 0:
535 if (rt != rt2)
536 goto route_done;
537 break;
538 case -EPERM:
539 rt = NULL;
540 break;
541 default:
542 goto out_unlock;
543 }
544
545 if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
546 goto relookup_failed;
547
548 if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
549 err = __ip_route_output_key(net, &rt2, &fl);
550 else {
551 struct flowi fl2 = {};
552 unsigned long orefdst;
553
554 fl2.fl4_dst = fl.fl4_src;
555 if (ip_route_output_key(net, &rt2, &fl2))
556 goto relookup_failed;
557
558 /* Ugh! */
559 orefdst = skb_in->_skb_refdst; /* save old refdst */
560 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
561 RT_TOS(tos), rt2->dst.dev);
562
563 dst_release(&rt2->dst);
564 rt2 = skb_rtable(skb_in);
565 skb_in->_skb_refdst = orefdst; /* restore old refdst */
566 }
567
568 if (err)
569 goto relookup_failed;
570
571 err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
572 XFRM_LOOKUP_ICMP);
573 switch (err) {
574 case 0:
575 dst_release(&rt->dst);
576 rt = rt2;
577 break;
578 case -EPERM:
579 goto ende;
580 default:
581relookup_failed:
582 if (!rt)
583 goto out_unlock;
584 break;
585 }
586 }
587 602
588route_done:
589 if (!icmpv4_xrlim_allow(net, rt, type, code)) 603 if (!icmpv4_xrlim_allow(net, rt, type, code))
590 goto ende; 604 goto ende;
591 605
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index e0e77e297de3..44ba9068b72f 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -325,7 +325,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
325 struct flowi fl = { .oif = dev->ifindex, 325 struct flowi fl = { .oif = dev->ifindex,
326 .fl4_dst = IGMPV3_ALL_MCR, 326 .fl4_dst = IGMPV3_ALL_MCR,
327 .proto = IPPROTO_IGMP }; 327 .proto = IPPROTO_IGMP };
328 if (ip_route_output_key(net, &rt, &fl)) { 328 rt = ip_route_output_key(net, &fl);
329 if (IS_ERR(rt)) {
329 kfree_skb(skb); 330 kfree_skb(skb);
330 return NULL; 331 return NULL;
331 } 332 }
@@ -670,7 +671,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
670 struct flowi fl = { .oif = dev->ifindex, 671 struct flowi fl = { .oif = dev->ifindex,
671 .fl4_dst = dst, 672 .fl4_dst = dst,
672 .proto = IPPROTO_IGMP }; 673 .proto = IPPROTO_IGMP };
673 if (ip_route_output_key(net, &rt, &fl)) 674 rt = ip_route_output_key(net, &fl);
675 if (IS_ERR(rt))
674 return -1; 676 return -1;
675 } 677 }
676 if (rt->rt_src == 0) { 678 if (rt->rt_src == 0) {
@@ -1440,7 +1442,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1440static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1442static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1441{ 1443{
1442 struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr }; 1444 struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
1443 struct rtable *rt;
1444 struct net_device *dev = NULL; 1445 struct net_device *dev = NULL;
1445 struct in_device *idev = NULL; 1446 struct in_device *idev = NULL;
1446 1447
@@ -1454,9 +1455,12 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1454 return NULL; 1455 return NULL;
1455 } 1456 }
1456 1457
1457 if (!dev && !ip_route_output_key(net, &rt, &fl)) { 1458 if (!dev) {
1458 dev = rt->dst.dev; 1459 struct rtable *rt = ip_route_output_key(net, &fl);
1459 ip_rt_put(rt); 1460 if (!IS_ERR(rt)) {
1461 dev = rt->dst.dev;
1462 ip_rt_put(rt);
1463 }
1460 } 1464 }
1461 if (dev) { 1465 if (dev) {
1462 imr->imr_ifindex = dev->ifindex; 1466 imr->imr_ifindex = dev->ifindex;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 97e5fb765265..e4e301a61c5b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -369,7 +369,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
369 struct net *net = sock_net(sk); 369 struct net *net = sock_net(sk);
370 370
371 security_req_classify_flow(req, &fl); 371 security_req_classify_flow(req, &fl);
372 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) 372 rt = ip_route_output_flow(net, &fl, sk);
373 if (IS_ERR(rt))
373 goto no_route; 374 goto no_route;
374 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 375 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
375 goto route_err; 376 goto route_err;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 48f8d4592ccd..f604ffdbea27 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -81,19 +81,19 @@ static const struct inet_peer peer_fake_node = {
81 81
82struct inet_peer_base { 82struct inet_peer_base {
83 struct inet_peer __rcu *root; 83 struct inet_peer __rcu *root;
84 spinlock_t lock; 84 seqlock_t lock;
85 int total; 85 int total;
86}; 86};
87 87
88static struct inet_peer_base v4_peers = { 88static struct inet_peer_base v4_peers = {
89 .root = peer_avl_empty_rcu, 89 .root = peer_avl_empty_rcu,
90 .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock), 90 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
91 .total = 0, 91 .total = 0,
92}; 92};
93 93
94static struct inet_peer_base v6_peers = { 94static struct inet_peer_base v6_peers = {
95 .root = peer_avl_empty_rcu, 95 .root = peer_avl_empty_rcu,
96 .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock), 96 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
97 .total = 0, 97 .total = 0,
98}; 98};
99 99
@@ -177,6 +177,9 @@ static int addr_compare(const struct inetpeer_addr *a,
177 return 0; 177 return 0;
178} 178}
179 179
180#define rcu_deref_locked(X, BASE) \
181 rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
182
180/* 183/*
181 * Called with local BH disabled and the pool lock held. 184 * Called with local BH disabled and the pool lock held.
182 */ 185 */
@@ -187,8 +190,7 @@ static int addr_compare(const struct inetpeer_addr *a,
187 \ 190 \
188 stackptr = _stack; \ 191 stackptr = _stack; \
189 *stackptr++ = &_base->root; \ 192 *stackptr++ = &_base->root; \
190 for (u = rcu_dereference_protected(_base->root, \ 193 for (u = rcu_deref_locked(_base->root, _base); \
191 lockdep_is_held(&_base->lock)); \
192 u != peer_avl_empty; ) { \ 194 u != peer_avl_empty; ) { \
193 int cmp = addr_compare(_daddr, &u->daddr); \ 195 int cmp = addr_compare(_daddr, &u->daddr); \
194 if (cmp == 0) \ 196 if (cmp == 0) \
@@ -198,8 +200,7 @@ static int addr_compare(const struct inetpeer_addr *a,
198 else \ 200 else \
199 v = &u->avl_right; \ 201 v = &u->avl_right; \
200 *stackptr++ = v; \ 202 *stackptr++ = v; \
201 u = rcu_dereference_protected(*v, \ 203 u = rcu_deref_locked(*v, _base); \
202 lockdep_is_held(&_base->lock)); \
203 } \ 204 } \
204 u; \ 205 u; \
205}) 206})
@@ -246,13 +247,11 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
246 struct inet_peer __rcu **v; \ 247 struct inet_peer __rcu **v; \
247 *stackptr++ = &start->avl_left; \ 248 *stackptr++ = &start->avl_left; \
248 v = &start->avl_left; \ 249 v = &start->avl_left; \
249 for (u = rcu_dereference_protected(*v, \ 250 for (u = rcu_deref_locked(*v, base); \
250 lockdep_is_held(&base->lock)); \
251 u->avl_right != peer_avl_empty_rcu; ) { \ 251 u->avl_right != peer_avl_empty_rcu; ) { \
252 v = &u->avl_right; \ 252 v = &u->avl_right; \
253 *stackptr++ = v; \ 253 *stackptr++ = v; \
254 u = rcu_dereference_protected(*v, \ 254 u = rcu_deref_locked(*v, base); \
255 lockdep_is_held(&base->lock)); \
256 } \ 255 } \
257 u; \ 256 u; \
258}) 257})
@@ -271,21 +270,16 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
271 270
272 while (stackend > stack) { 271 while (stackend > stack) {
273 nodep = *--stackend; 272 nodep = *--stackend;
274 node = rcu_dereference_protected(*nodep, 273 node = rcu_deref_locked(*nodep, base);
275 lockdep_is_held(&base->lock)); 274 l = rcu_deref_locked(node->avl_left, base);
276 l = rcu_dereference_protected(node->avl_left, 275 r = rcu_deref_locked(node->avl_right, base);
277 lockdep_is_held(&base->lock));
278 r = rcu_dereference_protected(node->avl_right,
279 lockdep_is_held(&base->lock));
280 lh = node_height(l); 276 lh = node_height(l);
281 rh = node_height(r); 277 rh = node_height(r);
282 if (lh > rh + 1) { /* l: RH+2 */ 278 if (lh > rh + 1) { /* l: RH+2 */
283 struct inet_peer *ll, *lr, *lrl, *lrr; 279 struct inet_peer *ll, *lr, *lrl, *lrr;
284 int lrh; 280 int lrh;
285 ll = rcu_dereference_protected(l->avl_left, 281 ll = rcu_deref_locked(l->avl_left, base);
286 lockdep_is_held(&base->lock)); 282 lr = rcu_deref_locked(l->avl_right, base);
287 lr = rcu_dereference_protected(l->avl_right,
288 lockdep_is_held(&base->lock));
289 lrh = node_height(lr); 283 lrh = node_height(lr);
290 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 284 if (lrh <= node_height(ll)) { /* ll: RH+1 */
291 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ 285 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
@@ -296,10 +290,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
296 l->avl_height = node->avl_height + 1; 290 l->avl_height = node->avl_height + 1;
297 RCU_INIT_POINTER(*nodep, l); 291 RCU_INIT_POINTER(*nodep, l);
298 } else { /* ll: RH, lr: RH+1 */ 292 } else { /* ll: RH, lr: RH+1 */
299 lrl = rcu_dereference_protected(lr->avl_left, 293 lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
300 lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */ 294 lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
301 lrr = rcu_dereference_protected(lr->avl_right,
302 lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */
303 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ 295 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
304 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ 296 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
305 node->avl_height = rh + 1; /* node: RH+1 */ 297 node->avl_height = rh + 1; /* node: RH+1 */
@@ -314,10 +306,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
314 } else if (rh > lh + 1) { /* r: LH+2 */ 306 } else if (rh > lh + 1) { /* r: LH+2 */
315 struct inet_peer *rr, *rl, *rlr, *rll; 307 struct inet_peer *rr, *rl, *rlr, *rll;
316 int rlh; 308 int rlh;
317 rr = rcu_dereference_protected(r->avl_right, 309 rr = rcu_deref_locked(r->avl_right, base);
318 lockdep_is_held(&base->lock)); 310 rl = rcu_deref_locked(r->avl_left, base);
319 rl = rcu_dereference_protected(r->avl_left,
320 lockdep_is_held(&base->lock));
321 rlh = node_height(rl); 311 rlh = node_height(rl);
322 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 312 if (rlh <= node_height(rr)) { /* rr: LH+1 */
323 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ 313 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
@@ -328,10 +318,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
328 r->avl_height = node->avl_height + 1; 318 r->avl_height = node->avl_height + 1;
329 RCU_INIT_POINTER(*nodep, r); 319 RCU_INIT_POINTER(*nodep, r);
330 } else { /* rr: RH, rl: RH+1 */ 320 } else { /* rr: RH, rl: RH+1 */
331 rlr = rcu_dereference_protected(rl->avl_right, 321 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
332 lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */ 322 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
333 rll = rcu_dereference_protected(rl->avl_left,
334 lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */
335 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ 323 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
336 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ 324 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
337 node->avl_height = lh + 1; /* node: LH+1 */ 325 node->avl_height = lh + 1; /* node: LH+1 */
@@ -372,7 +360,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
372 360
373 do_free = 0; 361 do_free = 0;
374 362
375 spin_lock_bh(&base->lock); 363 write_seqlock_bh(&base->lock);
376 /* Check the reference counter. It was artificially incremented by 1 364 /* Check the reference counter. It was artificially incremented by 1
377 * in cleanup() function to prevent sudden disappearing. If we can 365 * in cleanup() function to prevent sudden disappearing. If we can
378 * atomically (because of lockless readers) take this last reference, 366 * atomically (because of lockless readers) take this last reference,
@@ -392,8 +380,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
392 /* look for a node to insert instead of p */ 380 /* look for a node to insert instead of p */
393 struct inet_peer *t; 381 struct inet_peer *t;
394 t = lookup_rightempty(p, base); 382 t = lookup_rightempty(p, base);
395 BUG_ON(rcu_dereference_protected(*stackptr[-1], 383 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
396 lockdep_is_held(&base->lock)) != t);
397 **--stackptr = t->avl_left; 384 **--stackptr = t->avl_left;
398 /* t is removed, t->daddr > x->daddr for any 385 /* t is removed, t->daddr > x->daddr for any
399 * x in p->avl_left subtree. 386 * x in p->avl_left subtree.
@@ -409,7 +396,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
409 base->total--; 396 base->total--;
410 do_free = 1; 397 do_free = 1;
411 } 398 }
412 spin_unlock_bh(&base->lock); 399 write_sequnlock_bh(&base->lock);
413 400
414 if (do_free) 401 if (do_free)
415 call_rcu_bh(&p->rcu, inetpeer_free_rcu); 402 call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -477,12 +464,16 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 464 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 struct inet_peer_base *base = family_to_base(daddr->family); 465 struct inet_peer_base *base = family_to_base(daddr->family);
479 struct inet_peer *p; 466 struct inet_peer *p;
467 unsigned int sequence;
468 int invalidated;
480 469
481 /* Look up for the address quickly, lockless. 470 /* Look up for the address quickly, lockless.
482 * Because of a concurrent writer, we might not find an existing entry. 471 * Because of a concurrent writer, we might not find an existing entry.
483 */ 472 */
484 rcu_read_lock_bh(); 473 rcu_read_lock_bh();
474 sequence = read_seqbegin(&base->lock);
485 p = lookup_rcu_bh(daddr, base); 475 p = lookup_rcu_bh(daddr, base);
476 invalidated = read_seqretry(&base->lock, sequence);
486 rcu_read_unlock_bh(); 477 rcu_read_unlock_bh();
487 478
488 if (p) { 479 if (p) {
@@ -493,14 +484,18 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
493 return p; 484 return p;
494 } 485 }
495 486
487 /* If no writer did a change during our lookup, we can return early. */
488 if (!create && !invalidated)
489 return NULL;
490
496 /* retry an exact lookup, taking the lock before. 491 /* retry an exact lookup, taking the lock before.
497 * At least, nodes should be hot in our cache. 492 * At least, nodes should be hot in our cache.
498 */ 493 */
499 spin_lock_bh(&base->lock); 494 write_seqlock_bh(&base->lock);
500 p = lookup(daddr, stack, base); 495 p = lookup(daddr, stack, base);
501 if (p != peer_avl_empty) { 496 if (p != peer_avl_empty) {
502 atomic_inc(&p->refcnt); 497 atomic_inc(&p->refcnt);
503 spin_unlock_bh(&base->lock); 498 write_sequnlock_bh(&base->lock);
504 /* Remove the entry from unused list if it was there. */ 499 /* Remove the entry from unused list if it was there. */
505 unlink_from_unused(p); 500 unlink_from_unused(p);
506 return p; 501 return p;
@@ -524,7 +519,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
524 link_to_pool(p, base); 519 link_to_pool(p, base);
525 base->total++; 520 base->total++;
526 } 521 }
527 spin_unlock_bh(&base->lock); 522 write_sequnlock_bh(&base->lock);
528 523
529 if (base->total >= inet_peer_threshold) 524 if (base->total >= inet_peer_threshold)
530 /* Remove one less-recently-used entry. */ 525 /* Remove one less-recently-used entry. */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6613edfac28c..f9af98dd7561 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -778,7 +778,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
778 .proto = IPPROTO_GRE, 778 .proto = IPPROTO_GRE,
779 .fl_gre_key = tunnel->parms.o_key 779 .fl_gre_key = tunnel->parms.o_key
780 }; 780 };
781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 781 rt = ip_route_output_key(dev_net(dev), &fl);
782 if (IS_ERR(rt)) {
782 dev->stats.tx_carrier_errors++; 783 dev->stats.tx_carrier_errors++;
783 goto tx_error; 784 goto tx_error;
784 } 785 }
@@ -953,9 +954,9 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
953 .proto = IPPROTO_GRE, 954 .proto = IPPROTO_GRE,
954 .fl_gre_key = tunnel->parms.o_key 955 .fl_gre_key = tunnel->parms.o_key
955 }; 956 };
956 struct rtable *rt; 957 struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
957 958
958 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 959 if (!IS_ERR(rt)) {
959 tdev = rt->dst.dev; 960 tdev = rt->dst.dev;
960 ip_rt_put(rt); 961 ip_rt_put(rt);
961 } 962 }
@@ -1215,9 +1216,9 @@ static int ipgre_open(struct net_device *dev)
1215 .proto = IPPROTO_GRE, 1216 .proto = IPPROTO_GRE,
1216 .fl_gre_key = t->parms.o_key 1217 .fl_gre_key = t->parms.o_key
1217 }; 1218 };
1218 struct rtable *rt; 1219 struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
1219 1220
1220 if (ip_route_output_key(dev_net(dev), &rt, &fl)) 1221 if (IS_ERR(rt))
1221 return -EADDRNOTAVAIL; 1222 return -EADDRNOTAVAIL;
1222 dev = rt->dst.dev; 1223 dev = rt->dst.dev;
1223 ip_rt_put(rt); 1224 ip_rt_put(rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04c7b3ba6b39..171f483b21d5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -355,7 +355,8 @@ int ip_queue_xmit(struct sk_buff *skb)
355 * itself out. 355 * itself out.
356 */ 356 */
357 security_sk_classify_flow(sk, &fl); 357 security_sk_classify_flow(sk, &fl);
358 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) 358 rt = ip_route_output_flow(sock_net(sk), &fl, sk);
359 if (IS_ERR(rt))
359 goto no_route; 360 goto no_route;
360 } 361 }
361 sk_setup_caps(sk, &rt->dst); 362 sk_setup_caps(sk, &rt->dst);
@@ -733,6 +734,7 @@ csum_page(struct page *page, int offset, int copy)
733} 734}
734 735
735static inline int ip_ufo_append_data(struct sock *sk, 736static inline int ip_ufo_append_data(struct sock *sk,
737 struct sk_buff_head *queue,
736 int getfrag(void *from, char *to, int offset, int len, 738 int getfrag(void *from, char *to, int offset, int len,
737 int odd, struct sk_buff *skb), 739 int odd, struct sk_buff *skb),
738 void *from, int length, int hh_len, int fragheaderlen, 740 void *from, int length, int hh_len, int fragheaderlen,
@@ -745,7 +747,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
745 * device, so create one single skb packet containing complete 747 * device, so create one single skb packet containing complete
746 * udp datagram 748 * udp datagram
747 */ 749 */
748 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { 750 if ((skb = skb_peek_tail(queue)) == NULL) {
749 skb = sock_alloc_send_skb(sk, 751 skb = sock_alloc_send_skb(sk,
750 hh_len + fragheaderlen + transhdrlen + 20, 752 hh_len + fragheaderlen + transhdrlen + 20,
751 (flags & MSG_DONTWAIT), &err); 753 (flags & MSG_DONTWAIT), &err);
@@ -767,40 +769,28 @@ static inline int ip_ufo_append_data(struct sock *sk,
767 769
768 skb->ip_summed = CHECKSUM_PARTIAL; 770 skb->ip_summed = CHECKSUM_PARTIAL;
769 skb->csum = 0; 771 skb->csum = 0;
770 sk->sk_sndmsg_off = 0;
771 772
772 /* specify the length of each IP datagram fragment */ 773 /* specify the length of each IP datagram fragment */
773 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 774 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
774 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 775 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
775 __skb_queue_tail(&sk->sk_write_queue, skb); 776 __skb_queue_tail(queue, skb);
776 } 777 }
777 778
778 return skb_append_datato_frags(sk, skb, getfrag, from, 779 return skb_append_datato_frags(sk, skb, getfrag, from,
779 (length - transhdrlen)); 780 (length - transhdrlen));
780} 781}
781 782
782/* 783static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
783 * ip_append_data() and ip_append_page() can make one large IP datagram 784 struct inet_cork *cork,
784 * from many pieces of data. Each pieces will be holded on the socket 785 int getfrag(void *from, char *to, int offset,
785 * until ip_push_pending_frames() is called. Each piece can be a page 786 int len, int odd, struct sk_buff *skb),
786 * or non-page data. 787 void *from, int length, int transhdrlen,
787 * 788 unsigned int flags)
788 * Not only UDP, other transport protocols - e.g. raw sockets - can use
789 * this interface potentially.
790 *
791 * LATER: length must be adjusted by pad at tail, when it is required.
792 */
793int ip_append_data(struct sock *sk,
794 int getfrag(void *from, char *to, int offset, int len,
795 int odd, struct sk_buff *skb),
796 void *from, int length, int transhdrlen,
797 struct ipcm_cookie *ipc, struct rtable **rtp,
798 unsigned int flags)
799{ 789{
800 struct inet_sock *inet = inet_sk(sk); 790 struct inet_sock *inet = inet_sk(sk);
801 struct sk_buff *skb; 791 struct sk_buff *skb;
802 792
803 struct ip_options *opt = NULL; 793 struct ip_options *opt = cork->opt;
804 int hh_len; 794 int hh_len;
805 int exthdrlen; 795 int exthdrlen;
806 int mtu; 796 int mtu;
@@ -809,58 +799,19 @@ int ip_append_data(struct sock *sk,
809 int offset = 0; 799 int offset = 0;
810 unsigned int maxfraglen, fragheaderlen; 800 unsigned int maxfraglen, fragheaderlen;
811 int csummode = CHECKSUM_NONE; 801 int csummode = CHECKSUM_NONE;
812 struct rtable *rt; 802 struct rtable *rt = (struct rtable *)cork->dst;
813 803
814 if (flags&MSG_PROBE) 804 exthdrlen = transhdrlen ? rt->dst.header_len : 0;
815 return 0; 805 length += exthdrlen;
816 806 transhdrlen += exthdrlen;
817 if (skb_queue_empty(&sk->sk_write_queue)) { 807 mtu = cork->fragsize;
818 /*
819 * setup for corking.
820 */
821 opt = ipc->opt;
822 if (opt) {
823 if (inet->cork.opt == NULL) {
824 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
825 if (unlikely(inet->cork.opt == NULL))
826 return -ENOBUFS;
827 }
828 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
829 inet->cork.flags |= IPCORK_OPT;
830 inet->cork.addr = ipc->addr;
831 }
832 rt = *rtp;
833 if (unlikely(!rt))
834 return -EFAULT;
835 /*
836 * We steal reference to this route, caller should not release it
837 */
838 *rtp = NULL;
839 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
840 rt->dst.dev->mtu :
841 dst_mtu(rt->dst.path);
842 inet->cork.dst = &rt->dst;
843 inet->cork.length = 0;
844 sk->sk_sndmsg_page = NULL;
845 sk->sk_sndmsg_off = 0;
846 exthdrlen = rt->dst.header_len;
847 length += exthdrlen;
848 transhdrlen += exthdrlen;
849 } else {
850 rt = (struct rtable *)inet->cork.dst;
851 if (inet->cork.flags & IPCORK_OPT)
852 opt = inet->cork.opt;
853 808
854 transhdrlen = 0;
855 exthdrlen = 0;
856 mtu = inet->cork.fragsize;
857 }
858 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 809 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
859 810
860 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 811 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
861 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 812 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
862 813
863 if (inet->cork.length + length > 0xFFFF - fragheaderlen) { 814 if (cork->length + length > 0xFFFF - fragheaderlen) {
864 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 815 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
865 mtu-exthdrlen); 816 mtu-exthdrlen);
866 return -EMSGSIZE; 817 return -EMSGSIZE;
@@ -876,15 +827,15 @@ int ip_append_data(struct sock *sk,
876 !exthdrlen) 827 !exthdrlen)
877 csummode = CHECKSUM_PARTIAL; 828 csummode = CHECKSUM_PARTIAL;
878 829
879 skb = skb_peek_tail(&sk->sk_write_queue); 830 skb = skb_peek_tail(queue);
880 831
881 inet->cork.length += length; 832 cork->length += length;
882 if (((length > mtu) || (skb && skb_is_gso(skb))) && 833 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
883 (sk->sk_protocol == IPPROTO_UDP) && 834 (sk->sk_protocol == IPPROTO_UDP) &&
884 (rt->dst.dev->features & NETIF_F_UFO)) { 835 (rt->dst.dev->features & NETIF_F_UFO)) {
885 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, 836 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
886 fragheaderlen, transhdrlen, mtu, 837 hh_len, fragheaderlen, transhdrlen,
887 flags); 838 mtu, flags);
888 if (err) 839 if (err)
889 goto error; 840 goto error;
890 return 0; 841 return 0;
@@ -961,7 +912,7 @@ alloc_new_skb:
961 else 912 else
962 /* only the initial fragment is 913 /* only the initial fragment is
963 time stamped */ 914 time stamped */
964 ipc->tx_flags = 0; 915 cork->tx_flags = 0;
965 } 916 }
966 if (skb == NULL) 917 if (skb == NULL)
967 goto error; 918 goto error;
@@ -972,7 +923,7 @@ alloc_new_skb:
972 skb->ip_summed = csummode; 923 skb->ip_summed = csummode;
973 skb->csum = 0; 924 skb->csum = 0;
974 skb_reserve(skb, hh_len); 925 skb_reserve(skb, hh_len);
975 skb_shinfo(skb)->tx_flags = ipc->tx_flags; 926 skb_shinfo(skb)->tx_flags = cork->tx_flags;
976 927
977 /* 928 /*
978 * Find where to start putting bytes. 929 * Find where to start putting bytes.
@@ -1009,7 +960,7 @@ alloc_new_skb:
1009 /* 960 /*
1010 * Put the packet on the pending queue. 961 * Put the packet on the pending queue.
1011 */ 962 */
1012 __skb_queue_tail(&sk->sk_write_queue, skb); 963 __skb_queue_tail(queue, skb);
1013 continue; 964 continue;
1014 } 965 }
1015 966
@@ -1029,8 +980,8 @@ alloc_new_skb:
1029 } else { 980 } else {
1030 int i = skb_shinfo(skb)->nr_frags; 981 int i = skb_shinfo(skb)->nr_frags;
1031 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; 982 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1032 struct page *page = sk->sk_sndmsg_page; 983 struct page *page = cork->page;
1033 int off = sk->sk_sndmsg_off; 984 int off = cork->off;
1034 unsigned int left; 985 unsigned int left;
1035 986
1036 if (page && (left = PAGE_SIZE - off) > 0) { 987 if (page && (left = PAGE_SIZE - off) > 0) {
@@ -1042,7 +993,7 @@ alloc_new_skb:
1042 goto error; 993 goto error;
1043 } 994 }
1044 get_page(page); 995 get_page(page);
1045 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); 996 skb_fill_page_desc(skb, i, page, off, 0);
1046 frag = &skb_shinfo(skb)->frags[i]; 997 frag = &skb_shinfo(skb)->frags[i];
1047 } 998 }
1048 } else if (i < MAX_SKB_FRAGS) { 999 } else if (i < MAX_SKB_FRAGS) {
@@ -1053,8 +1004,8 @@ alloc_new_skb:
1053 err = -ENOMEM; 1004 err = -ENOMEM;
1054 goto error; 1005 goto error;
1055 } 1006 }
1056 sk->sk_sndmsg_page = page; 1007 cork->page = page;
1057 sk->sk_sndmsg_off = 0; 1008 cork->off = 0;
1058 1009
1059 skb_fill_page_desc(skb, i, page, 0, 0); 1010 skb_fill_page_desc(skb, i, page, 0, 0);
1060 frag = &skb_shinfo(skb)->frags[i]; 1011 frag = &skb_shinfo(skb)->frags[i];
@@ -1066,7 +1017,7 @@ alloc_new_skb:
1066 err = -EFAULT; 1017 err = -EFAULT;
1067 goto error; 1018 goto error;
1068 } 1019 }
1069 sk->sk_sndmsg_off += copy; 1020 cork->off += copy;
1070 frag->size += copy; 1021 frag->size += copy;
1071 skb->len += copy; 1022 skb->len += copy;
1072 skb->data_len += copy; 1023 skb->data_len += copy;
@@ -1080,11 +1031,87 @@ alloc_new_skb:
1080 return 0; 1031 return 0;
1081 1032
1082error: 1033error:
1083 inet->cork.length -= length; 1034 cork->length -= length;
1084 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1035 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1085 return err; 1036 return err;
1086} 1037}
1087 1038
1039static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1040 struct ipcm_cookie *ipc, struct rtable **rtp)
1041{
1042 struct inet_sock *inet = inet_sk(sk);
1043 struct ip_options *opt;
1044 struct rtable *rt;
1045
1046 /*
1047 * setup for corking.
1048 */
1049 opt = ipc->opt;
1050 if (opt) {
1051 if (cork->opt == NULL) {
1052 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1053 sk->sk_allocation);
1054 if (unlikely(cork->opt == NULL))
1055 return -ENOBUFS;
1056 }
1057 memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen);
1058 cork->flags |= IPCORK_OPT;
1059 cork->addr = ipc->addr;
1060 }
1061 rt = *rtp;
1062 if (unlikely(!rt))
1063 return -EFAULT;
1064 /*
1065 * We steal reference to this route, caller should not release it
1066 */
1067 *rtp = NULL;
1068 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1069 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1070 cork->dst = &rt->dst;
1071 cork->length = 0;
1072 cork->tx_flags = ipc->tx_flags;
1073 cork->page = NULL;
1074 cork->off = 0;
1075
1076 return 0;
1077}
1078
1079/*
1080 * ip_append_data() and ip_append_page() can make one large IP datagram
1081 * from many pieces of data. Each pieces will be holded on the socket
1082 * until ip_push_pending_frames() is called. Each piece can be a page
1083 * or non-page data.
1084 *
1085 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1086 * this interface potentially.
1087 *
1088 * LATER: length must be adjusted by pad at tail, when it is required.
1089 */
1090int ip_append_data(struct sock *sk,
1091 int getfrag(void *from, char *to, int offset, int len,
1092 int odd, struct sk_buff *skb),
1093 void *from, int length, int transhdrlen,
1094 struct ipcm_cookie *ipc, struct rtable **rtp,
1095 unsigned int flags)
1096{
1097 struct inet_sock *inet = inet_sk(sk);
1098 int err;
1099
1100 if (flags&MSG_PROBE)
1101 return 0;
1102
1103 if (skb_queue_empty(&sk->sk_write_queue)) {
1104 err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
1105 if (err)
1106 return err;
1107 } else {
1108 transhdrlen = 0;
1109 }
1110
1111 return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
1112 from, length, transhdrlen, flags);
1113}
1114
1088ssize_t ip_append_page(struct sock *sk, struct page *page, 1115ssize_t ip_append_page(struct sock *sk, struct page *page,
1089 int offset, size_t size, int flags) 1116 int offset, size_t size, int flags)
1090{ 1117{
@@ -1228,40 +1255,41 @@ error:
1228 return err; 1255 return err;
1229} 1256}
1230 1257
1231static void ip_cork_release(struct inet_sock *inet) 1258static void ip_cork_release(struct inet_cork *cork)
1232{ 1259{
1233 inet->cork.flags &= ~IPCORK_OPT; 1260 cork->flags &= ~IPCORK_OPT;
1234 kfree(inet->cork.opt); 1261 kfree(cork->opt);
1235 inet->cork.opt = NULL; 1262 cork->opt = NULL;
1236 dst_release(inet->cork.dst); 1263 dst_release(cork->dst);
1237 inet->cork.dst = NULL; 1264 cork->dst = NULL;
1238} 1265}
1239 1266
1240/* 1267/*
1241 * Combined all pending IP fragments on the socket as one IP datagram 1268 * Combined all pending IP fragments on the socket as one IP datagram
1242 * and push them out. 1269 * and push them out.
1243 */ 1270 */
1244int ip_push_pending_frames(struct sock *sk) 1271struct sk_buff *__ip_make_skb(struct sock *sk,
1272 struct sk_buff_head *queue,
1273 struct inet_cork *cork)
1245{ 1274{
1246 struct sk_buff *skb, *tmp_skb; 1275 struct sk_buff *skb, *tmp_skb;
1247 struct sk_buff **tail_skb; 1276 struct sk_buff **tail_skb;
1248 struct inet_sock *inet = inet_sk(sk); 1277 struct inet_sock *inet = inet_sk(sk);
1249 struct net *net = sock_net(sk); 1278 struct net *net = sock_net(sk);
1250 struct ip_options *opt = NULL; 1279 struct ip_options *opt = NULL;
1251 struct rtable *rt = (struct rtable *)inet->cork.dst; 1280 struct rtable *rt = (struct rtable *)cork->dst;
1252 struct iphdr *iph; 1281 struct iphdr *iph;
1253 __be16 df = 0; 1282 __be16 df = 0;
1254 __u8 ttl; 1283 __u8 ttl;
1255 int err = 0;
1256 1284
1257 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) 1285 if ((skb = __skb_dequeue(queue)) == NULL)
1258 goto out; 1286 goto out;
1259 tail_skb = &(skb_shinfo(skb)->frag_list); 1287 tail_skb = &(skb_shinfo(skb)->frag_list);
1260 1288
1261 /* move skb->data to ip header from ext header */ 1289 /* move skb->data to ip header from ext header */
1262 if (skb->data < skb_network_header(skb)) 1290 if (skb->data < skb_network_header(skb))
1263 __skb_pull(skb, skb_network_offset(skb)); 1291 __skb_pull(skb, skb_network_offset(skb));
1264 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 1292 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1265 __skb_pull(tmp_skb, skb_network_header_len(skb)); 1293 __skb_pull(tmp_skb, skb_network_header_len(skb));
1266 *tail_skb = tmp_skb; 1294 *tail_skb = tmp_skb;
1267 tail_skb = &(tmp_skb->next); 1295 tail_skb = &(tmp_skb->next);
@@ -1287,8 +1315,8 @@ int ip_push_pending_frames(struct sock *sk)
1287 ip_dont_fragment(sk, &rt->dst))) 1315 ip_dont_fragment(sk, &rt->dst)))
1288 df = htons(IP_DF); 1316 df = htons(IP_DF);
1289 1317
1290 if (inet->cork.flags & IPCORK_OPT) 1318 if (cork->flags & IPCORK_OPT)
1291 opt = inet->cork.opt; 1319 opt = cork->opt;
1292 1320
1293 if (rt->rt_type == RTN_MULTICAST) 1321 if (rt->rt_type == RTN_MULTICAST)
1294 ttl = inet->mc_ttl; 1322 ttl = inet->mc_ttl;
@@ -1300,7 +1328,7 @@ int ip_push_pending_frames(struct sock *sk)
1300 iph->ihl = 5; 1328 iph->ihl = 5;
1301 if (opt) { 1329 if (opt) {
1302 iph->ihl += opt->optlen>>2; 1330 iph->ihl += opt->optlen>>2;
1303 ip_options_build(skb, opt, inet->cork.addr, rt, 0); 1331 ip_options_build(skb, opt, cork->addr, rt, 0);
1304 } 1332 }
1305 iph->tos = inet->tos; 1333 iph->tos = inet->tos;
1306 iph->frag_off = df; 1334 iph->frag_off = df;
@@ -1316,44 +1344,95 @@ int ip_push_pending_frames(struct sock *sk)
1316 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec 1344 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1317 * on dst refcount 1345 * on dst refcount
1318 */ 1346 */
1319 inet->cork.dst = NULL; 1347 cork->dst = NULL;
1320 skb_dst_set(skb, &rt->dst); 1348 skb_dst_set(skb, &rt->dst);
1321 1349
1322 if (iph->protocol == IPPROTO_ICMP) 1350 if (iph->protocol == IPPROTO_ICMP)
1323 icmp_out_count(net, ((struct icmphdr *) 1351 icmp_out_count(net, ((struct icmphdr *)
1324 skb_transport_header(skb))->type); 1352 skb_transport_header(skb))->type);
1325 1353
1326 /* Netfilter gets whole the not fragmented skb. */ 1354 ip_cork_release(cork);
1355out:
1356 return skb;
1357}
1358
1359int ip_send_skb(struct sk_buff *skb)
1360{
1361 struct net *net = sock_net(skb->sk);
1362 int err;
1363
1327 err = ip_local_out(skb); 1364 err = ip_local_out(skb);
1328 if (err) { 1365 if (err) {
1329 if (err > 0) 1366 if (err > 0)
1330 err = net_xmit_errno(err); 1367 err = net_xmit_errno(err);
1331 if (err) 1368 if (err)
1332 goto error; 1369 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1333 } 1370 }
1334 1371
1335out:
1336 ip_cork_release(inet);
1337 return err; 1372 return err;
1373}
1338 1374
1339error: 1375int ip_push_pending_frames(struct sock *sk)
1340 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 1376{
1341 goto out; 1377 struct sk_buff *skb;
1378
1379 skb = ip_finish_skb(sk);
1380 if (!skb)
1381 return 0;
1382
1383 /* Netfilter gets whole the not fragmented skb. */
1384 return ip_send_skb(skb);
1342} 1385}
1343 1386
1344/* 1387/*
1345 * Throw away all pending data on the socket. 1388 * Throw away all pending data on the socket.
1346 */ 1389 */
1347void ip_flush_pending_frames(struct sock *sk) 1390static void __ip_flush_pending_frames(struct sock *sk,
1391 struct sk_buff_head *queue,
1392 struct inet_cork *cork)
1348{ 1393{
1349 struct sk_buff *skb; 1394 struct sk_buff *skb;
1350 1395
1351 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 1396 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1352 kfree_skb(skb); 1397 kfree_skb(skb);
1353 1398
1354 ip_cork_release(inet_sk(sk)); 1399 ip_cork_release(cork);
1400}
1401
1402void ip_flush_pending_frames(struct sock *sk)
1403{
1404 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
1355} 1405}
1356 1406
1407struct sk_buff *ip_make_skb(struct sock *sk,
1408 int getfrag(void *from, char *to, int offset,
1409 int len, int odd, struct sk_buff *skb),
1410 void *from, int length, int transhdrlen,
1411 struct ipcm_cookie *ipc, struct rtable **rtp,
1412 unsigned int flags)
1413{
1414 struct inet_cork cork = {};
1415 struct sk_buff_head queue;
1416 int err;
1417
1418 if (flags & MSG_PROBE)
1419 return NULL;
1420
1421 __skb_queue_head_init(&queue);
1422
1423 err = ip_setup_cork(sk, &cork, ipc, rtp);
1424 if (err)
1425 return ERR_PTR(err);
1426
1427 err = __ip_append_data(sk, &queue, &cork, getfrag,
1428 from, length, transhdrlen, flags);
1429 if (err) {
1430 __ip_flush_pending_frames(sk, &queue, &cork);
1431 return ERR_PTR(err);
1432 }
1433
1434 return __ip_make_skb(sk, &queue, &cork);
1435}
1357 1436
1358/* 1437/*
1359 * Fetch data from kernel space and fill in checksum if needed. 1438 * Fetch data from kernel space and fill in checksum if needed.
@@ -1411,7 +1490,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1411 .proto = sk->sk_protocol, 1490 .proto = sk->sk_protocol,
1412 .flags = ip_reply_arg_flowi_flags(arg) }; 1491 .flags = ip_reply_arg_flowi_flags(arg) };
1413 security_skb_classify_flow(skb, &fl); 1492 security_skb_classify_flow(skb, &fl);
1414 if (ip_route_output_key(sock_net(sk), &rt, &fl)) 1493 rt = ip_route_output_key(sock_net(sk), &fl);
1494 if (IS_ERR(rt))
1415 return; 1495 return;
1416 } 1496 }
1417 1497
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52fba54a..e1e17576baa6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -469,7 +469,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
469 .proto = IPPROTO_IPIP 469 .proto = IPPROTO_IPIP
470 }; 470 };
471 471
472 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 472 rt = ip_route_output_key(dev_net(dev), &fl);
473 if (IS_ERR(rt)) {
473 dev->stats.tx_carrier_errors++; 474 dev->stats.tx_carrier_errors++;
474 goto tx_error_icmp; 475 goto tx_error_icmp;
475 } 476 }
@@ -590,9 +591,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
590 .fl4_tos = RT_TOS(iph->tos), 591 .fl4_tos = RT_TOS(iph->tos),
591 .proto = IPPROTO_IPIP 592 .proto = IPPROTO_IPIP
592 }; 593 };
593 struct rtable *rt; 594 struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
594 595
595 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 596 if (!IS_ERR(rt)) {
596 tdev = rt->dst.dev; 597 tdev = rt->dst.dev;
597 ip_rt_put(rt); 598 ip_rt_put(rt);
598 } 599 }
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8b65a12654e7..9d5f6340af13 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1618,8 +1618,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1618 .fl4_tos = RT_TOS(iph->tos), 1618 .fl4_tos = RT_TOS(iph->tos),
1619 .proto = IPPROTO_IPIP 1619 .proto = IPPROTO_IPIP
1620 }; 1620 };
1621 1621 rt = ip_route_output_key(net, &fl);
1622 if (ip_route_output_key(net, &rt, &fl)) 1622 if (IS_ERR(rt))
1623 goto out_free; 1623 goto out_free;
1624 encap = sizeof(struct iphdr); 1624 encap = sizeof(struct iphdr);
1625 } else { 1625 } else {
@@ -1629,8 +1629,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1629 .fl4_tos = RT_TOS(iph->tos), 1629 .fl4_tos = RT_TOS(iph->tos),
1630 .proto = IPPROTO_IPIP 1630 .proto = IPPROTO_IPIP
1631 }; 1631 };
1632 1632 rt = ip_route_output_key(net, &fl);
1633 if (ip_route_output_key(net, &rt, &fl)) 1633 if (IS_ERR(rt))
1634 goto out_free; 1634 goto out_free;
1635 } 1635 }
1636 1636
@@ -1813,12 +1813,22 @@ int ip_mr_input(struct sk_buff *skb)
1813 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1813 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1814 goto dont_forward; 1814 goto dont_forward;
1815 1815
1816 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1816 {
1817 if (err < 0) { 1817 struct rtable *rt = skb_rtable(skb);
1818 kfree_skb(skb); 1818 struct flowi fl = {
1819 return err; 1819 .fl4_dst = rt->rt_key_dst,
1820 .fl4_src = rt->rt_key_src,
1821 .fl4_tos = rt->rt_tos,
1822 .oif = rt->rt_oif,
1823 .iif = rt->rt_iif,
1824 .mark = rt->rt_mark,
1825 };
1826 err = ipmr_fib_lookup(net, &fl, &mrt);
1827 if (err < 0) {
1828 kfree_skb(skb);
1829 return err;
1830 }
1820 } 1831 }
1821
1822 if (!local) { 1832 if (!local) {
1823 if (IPCB(skb)->opt.router_alert) { 1833 if (IPCB(skb)->opt.router_alert) {
1824 if (ip_call_ra_chain(skb)) 1834 if (ip_call_ra_chain(skb))
@@ -1946,9 +1956,19 @@ int pim_rcv_v1(struct sk_buff *skb)
1946 1956
1947 pim = igmp_hdr(skb); 1957 pim = igmp_hdr(skb);
1948 1958
1949 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) 1959 {
1950 goto drop; 1960 struct rtable *rt = skb_rtable(skb);
1951 1961 struct flowi fl = {
1962 .fl4_dst = rt->rt_key_dst,
1963 .fl4_src = rt->rt_key_src,
1964 .fl4_tos = rt->rt_tos,
1965 .oif = rt->rt_oif,
1966 .iif = rt->rt_iif,
1967 .mark = rt->rt_mark,
1968 };
1969 if (ipmr_fib_lookup(net, &fl, &mrt) < 0)
1970 goto drop;
1971 }
1952 if (!mrt->mroute_do_pim || 1972 if (!mrt->mroute_do_pim ||
1953 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1973 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1954 goto drop; 1974 goto drop;
@@ -1978,9 +1998,19 @@ static int pim_rcv(struct sk_buff *skb)
1978 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1998 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1979 goto drop; 1999 goto drop;
1980 2000
1981 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) 2001 {
1982 goto drop; 2002 struct rtable *rt = skb_rtable(skb);
1983 2003 struct flowi fl = {
2004 .fl4_dst = rt->rt_key_dst,
2005 .fl4_src = rt->rt_key_src,
2006 .fl4_tos = rt->rt_tos,
2007 .oif = rt->rt_oif,
2008 .iif = rt->rt_iif,
2009 .mark = rt->rt_mark,
2010 };
2011 if (ipmr_fib_lookup(net, &fl, &mrt) < 0)
2012 goto drop;
2013 }
1984 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 2014 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1985drop: 2015drop:
1986 kfree_skb(skb); 2016 kfree_skb(skb);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 994a1f29ebbc..67bf709180de 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -38,7 +38,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
39 fl.mark = skb->mark; 39 fl.mark = skb->mark;
40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
41 if (ip_route_output_key(net, &rt, &fl) != 0) 41 rt = ip_route_output_key(net, &fl);
42 if (IS_ERR(rt))
42 return -1; 43 return -1;
43 44
44 /* Drop old route. */ 45 /* Drop old route. */
@@ -48,7 +49,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
48 /* non-local src, find valid iif to satisfy 49 /* non-local src, find valid iif to satisfy
49 * rp-filter when calling ip_route_input. */ 50 * rp-filter when calling ip_route_input. */
50 fl.fl4_dst = iph->saddr; 51 fl.fl4_dst = iph->saddr;
51 if (ip_route_output_key(net, &rt, &fl) != 0) 52 rt = ip_route_output_key(net, &fl);
53 if (IS_ERR(rt))
52 return -1; 54 return -1;
53 55
54 orefdst = skb->_skb_refdst; 56 orefdst = skb->_skb_refdst;
@@ -69,7 +71,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
69 xfrm_decode_session(skb, &fl, AF_INET) == 0) { 71 xfrm_decode_session(skb, &fl, AF_INET) == 0) {
70 struct dst_entry *dst = skb_dst(skb); 72 struct dst_entry *dst = skb_dst(skb);
71 skb_dst_set(skb, NULL); 73 skb_dst_set(skb, NULL);
72 if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) 74 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
75 if (IS_ERR(dst))
73 return -1; 76 return -1;
74 skb_dst_set(skb, dst); 77 skb_dst_set(skb, dst);
75 } 78 }
@@ -102,7 +105,8 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
102 dst = ((struct xfrm_dst *)dst)->route; 105 dst = ((struct xfrm_dst *)dst)->route;
103 dst_hold(dst); 106 dst_hold(dst);
104 107
105 if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) 108 dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
109 if (IS_ERR(dst))
106 return -1; 110 return -1;
107 111
108 skb_dst_drop(skb); 112 skb_dst_drop(skb);
@@ -219,7 +223,11 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
219 223
220static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) 224static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
221{ 225{
222 return ip_route_output_key(&init_net, (struct rtable **)dst, fl); 226 struct rtable *rt = ip_route_output_key(&init_net, fl);
227 if (IS_ERR(rt))
228 return PTR_ERR(rt);
229 *dst = &rt->dst;
230 return 0;
223} 231}
224 232
225static const struct nf_afinfo nf_ip_afinfo = { 233static const struct nf_afinfo nf_ip_afinfo = {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6390ba299b3d..467d570d087a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -555,7 +555,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
555 .fl4_tos = tos, 555 .fl4_tos = tos,
556 .proto = inet->hdrincl ? IPPROTO_RAW : 556 .proto = inet->hdrincl ? IPPROTO_RAW :
557 sk->sk_protocol, 557 sk->sk_protocol,
558 }; 558 .flags = FLOWI_FLAG_CAN_SLEEP,
559 };
559 if (!inet->hdrincl) { 560 if (!inet->hdrincl) {
560 err = raw_probe_proto_opt(&fl, msg); 561 err = raw_probe_proto_opt(&fl, msg);
561 if (err) 562 if (err)
@@ -563,10 +564,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
563 } 564 }
564 565
565 security_sk_classify_flow(sk, &fl); 566 security_sk_classify_flow(sk, &fl);
566 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1); 567 rt = ip_route_output_flow(sock_net(sk), &fl, sk);
568 if (IS_ERR(rt)) {
569 err = PTR_ERR(rt);
570 goto done;
571 }
567 } 572 }
568 if (err)
569 goto done;
570 573
571 err = -EACCES; 574 err = -EACCES;
572 if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) 575 if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 52b077d45208..92a24ea34c1b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -424,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
424 dst_metric(&r->dst, RTAX_WINDOW), 424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)), 426 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->fl.fl4_tos, 427 r->rt_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 429 r->dst.hh ? (r->dst.hh->hh_output ==
430 dev_queue_xmit) : 0, 430 dev_queue_xmit) : 0,
@@ -711,22 +711,22 @@ static inline bool rt_caching(const struct net *net)
711 net->ipv4.sysctl_rt_cache_rebuild_count; 711 net->ipv4.sysctl_rt_cache_rebuild_count;
712} 712}
713 713
714static inline bool compare_hash_inputs(const struct flowi *fl1, 714static inline bool compare_hash_inputs(const struct rtable *rt1,
715 const struct flowi *fl2) 715 const struct rtable *rt2)
716{ 716{
717 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) | 717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) | 718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (fl1->iif ^ fl2->iif)) == 0); 719 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
720} 720}
721 721
722static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
723{ 723{
724 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) | 724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) | 725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (fl1->mark ^ fl2->mark) | 726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) | 727 (rt1->rt_tos ^ rt2->rt_tos) |
728 (fl1->oif ^ fl2->oif) | 728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (fl1->iif ^ fl2->iif)) == 0; 729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
730} 730}
731 731
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -813,7 +813,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
813 const struct rtable *aux = head; 813 const struct rtable *aux = head;
814 814
815 while (aux != rth) { 815 while (aux != rth) {
816 if (compare_hash_inputs(&aux->fl, &rth->fl)) 816 if (compare_hash_inputs(aux, rth))
817 return 0; 817 return 0;
818 aux = rcu_dereference_protected(aux->dst.rt_next, 1); 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
819 } 819 }
@@ -1014,8 +1014,8 @@ static int slow_chain_length(const struct rtable *head)
1014 return length >> FRACT_BITS; 1014 return length >> FRACT_BITS;
1015} 1015}
1016 1016
1017static int rt_intern_hash(unsigned hash, struct rtable *rt, 1017static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1018 struct rtable **rp, struct sk_buff *skb, int ifindex) 1018 struct sk_buff *skb, int ifindex)
1019{ 1019{
1020 struct rtable *rth, *cand; 1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp; 1021 struct rtable __rcu **rthp, **candp;
@@ -1056,7 +1056,7 @@ restart:
1056 printk(KERN_WARNING 1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n"); 1057 "Neighbour table failure & not caching routes.\n");
1058 ip_rt_put(rt); 1058 ip_rt_put(rt);
1059 return err; 1059 return ERR_PTR(err);
1060 } 1060 }
1061 } 1061 }
1062 1062
@@ -1073,7 +1073,7 @@ restart:
1073 rt_free(rth); 1073 rt_free(rth);
1074 continue; 1074 continue;
1075 } 1075 }
1076 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { 1076 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1077 /* Put it first */ 1077 /* Put it first */
1078 *rthp = rth->dst.rt_next; 1078 *rthp = rth->dst.rt_next;
1079 /* 1079 /*
@@ -1093,11 +1093,9 @@ restart:
1093 spin_unlock_bh(rt_hash_lock_addr(hash)); 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1094 1094
1095 rt_drop(rt); 1095 rt_drop(rt);
1096 if (rp) 1096 if (skb)
1097 *rp = rth;
1098 else
1099 skb_dst_set(skb, &rth->dst); 1097 skb_dst_set(skb, &rth->dst);
1100 return 0; 1098 return rth;
1101 } 1099 }
1102 1100
1103 if (!atomic_read(&rth->dst.__refcnt)) { 1101 if (!atomic_read(&rth->dst.__refcnt)) {
@@ -1138,7 +1136,7 @@ restart:
1138 rt_emergency_hash_rebuild(net); 1136 rt_emergency_hash_rebuild(net);
1139 spin_unlock_bh(rt_hash_lock_addr(hash)); 1137 spin_unlock_bh(rt_hash_lock_addr(hash));
1140 1138
1141 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1139 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1142 ifindex, rt_genid(net)); 1140 ifindex, rt_genid(net));
1143 goto restart; 1141 goto restart;
1144 } 1142 }
@@ -1154,7 +1152,7 @@ restart:
1154 1152
1155 if (err != -ENOBUFS) { 1153 if (err != -ENOBUFS) {
1156 rt_drop(rt); 1154 rt_drop(rt);
1157 return err; 1155 return ERR_PTR(err);
1158 } 1156 }
1159 1157
1160 /* Neighbour tables are full and nothing 1158 /* Neighbour tables are full and nothing
@@ -1175,7 +1173,7 @@ restart:
1175 if (net_ratelimit()) 1173 if (net_ratelimit())
1176 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); 1174 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1177 rt_drop(rt); 1175 rt_drop(rt);
1178 return -ENOBUFS; 1176 return ERR_PTR(-ENOBUFS);
1179 } 1177 }
1180 } 1178 }
1181 1179
@@ -1201,11 +1199,9 @@ restart:
1201 spin_unlock_bh(rt_hash_lock_addr(hash)); 1199 spin_unlock_bh(rt_hash_lock_addr(hash));
1202 1200
1203skip_hashing: 1201skip_hashing:
1204 if (rp) 1202 if (skb)
1205 *rp = rt;
1206 else
1207 skb_dst_set(skb, &rt->dst); 1203 skb_dst_set(skb, &rt->dst);
1208 return 0; 1204 return rt;
1209} 1205}
1210 1206
1211static atomic_t __rt_peer_genid = ATOMIC_INIT(0); 1207static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
@@ -1348,12 +1344,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1348 ip_rt_put(rt); 1344 ip_rt_put(rt);
1349 ret = NULL; 1345 ret = NULL;
1350 } else if (rt->rt_flags & RTCF_REDIRECTED) { 1346 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1351 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1352 rt->fl.oif, 1348 rt->rt_oif,
1353 rt_genid(dev_net(dst->dev))); 1349 rt_genid(dev_net(dst->dev)));
1354#if RT_CACHE_DEBUG >= 1 1350#if RT_CACHE_DEBUG >= 1
1355 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n", 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1356 &rt->rt_dst, rt->fl.fl4_tos); 1352 &rt->rt_dst, rt->rt_tos);
1357#endif 1353#endif
1358 rt_del(hash, rt); 1354 rt_del(hash, rt);
1359 ret = NULL; 1355 ret = NULL;
@@ -1701,8 +1697,17 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1701 if (rt_is_output_route(rt)) 1697 if (rt_is_output_route(rt))
1702 src = rt->rt_src; 1698 src = rt->rt_src;
1703 else { 1699 else {
1700 struct flowi fl = {
1701 .fl4_dst = rt->rt_key_dst,
1702 .fl4_src = rt->rt_key_src,
1703 .fl4_tos = rt->rt_tos,
1704 .oif = rt->rt_oif,
1705 .iif = rt->rt_iif,
1706 .mark = rt->rt_mark,
1707 };
1708
1704 rcu_read_lock(); 1709 rcu_read_lock();
1705 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) 1710 if (fib_lookup(dev_net(rt->dst.dev), &fl, &res) == 0)
1706 src = FIB_RES_PREFSRC(res); 1711 src = FIB_RES_PREFSRC(res);
1707 else 1712 else
1708 src = inet_select_addr(rt->dst.dev, rt->rt_gateway, 1713 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
@@ -1752,7 +1757,8 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1752 return mtu; 1757 return mtu;
1753} 1758}
1754 1759
1755static void rt_init_metrics(struct rtable *rt, struct fib_info *fi) 1760static void rt_init_metrics(struct rtable *rt, const struct flowi *oldflp,
1761 struct fib_info *fi)
1756{ 1762{
1757 struct inet_peer *peer; 1763 struct inet_peer *peer;
1758 int create = 0; 1764 int create = 0;
@@ -1760,12 +1766,12 @@ static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
1760 /* If a peer entry exists for this destination, we must hook 1766 /* If a peer entry exists for this destination, we must hook
1761 * it up in order to get at cached metrics. 1767 * it up in order to get at cached metrics.
1762 */ 1768 */
1763 if (rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS) 1769 if (oldflp && (oldflp->flags & FLOWI_FLAG_PRECOW_METRICS))
1764 create = 1; 1770 create = 1;
1765 1771
1766 rt_bind_peer(rt, create); 1772 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1767 peer = rt->peer;
1768 if (peer) { 1773 if (peer) {
1774 rt->rt_peer_genid = rt_peer_genid();
1769 if (inet_metrics_new(peer)) 1775 if (inet_metrics_new(peer))
1770 memcpy(peer->metrics, fi->fib_metrics, 1776 memcpy(peer->metrics, fi->fib_metrics,
1771 sizeof(u32) * RTAX_MAX); 1777 sizeof(u32) * RTAX_MAX);
@@ -1787,7 +1793,8 @@ static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
1787 } 1793 }
1788} 1794}
1789 1795
1790static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res, 1796static void rt_set_nexthop(struct rtable *rt, const struct flowi *oldflp,
1797 const struct fib_result *res,
1791 struct fib_info *fi, u16 type, u32 itag) 1798 struct fib_info *fi, u16 type, u32 itag)
1792{ 1799{
1793 struct dst_entry *dst = &rt->dst; 1800 struct dst_entry *dst = &rt->dst;
@@ -1796,7 +1803,7 @@ static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res,
1796 if (FIB_RES_GW(*res) && 1803 if (FIB_RES_GW(*res) &&
1797 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1804 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1798 rt->rt_gateway = FIB_RES_GW(*res); 1805 rt->rt_gateway = FIB_RES_GW(*res);
1799 rt_init_metrics(rt, fi); 1806 rt_init_metrics(rt, oldflp, fi);
1800#ifdef CONFIG_IP_ROUTE_CLASSID 1807#ifdef CONFIG_IP_ROUTE_CLASSID
1801 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1808 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1802#endif 1809#endif
@@ -1865,20 +1872,19 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1865 1872
1866 rth->dst.output = ip_rt_bug; 1873 rth->dst.output = ip_rt_bug;
1867 1874
1868 rth->fl.fl4_dst = daddr; 1875 rth->rt_key_dst = daddr;
1869 rth->rt_dst = daddr; 1876 rth->rt_dst = daddr;
1870 rth->fl.fl4_tos = tos; 1877 rth->rt_tos = tos;
1871 rth->fl.mark = skb->mark; 1878 rth->rt_mark = skb->mark;
1872 rth->fl.fl4_src = saddr; 1879 rth->rt_key_src = saddr;
1873 rth->rt_src = saddr; 1880 rth->rt_src = saddr;
1874#ifdef CONFIG_IP_ROUTE_CLASSID 1881#ifdef CONFIG_IP_ROUTE_CLASSID
1875 rth->dst.tclassid = itag; 1882 rth->dst.tclassid = itag;
1876#endif 1883#endif
1877 rth->rt_iif = 1884 rth->rt_iif = dev->ifindex;
1878 rth->fl.iif = dev->ifindex;
1879 rth->dst.dev = init_net.loopback_dev; 1885 rth->dst.dev = init_net.loopback_dev;
1880 dev_hold(rth->dst.dev); 1886 dev_hold(rth->dst.dev);
1881 rth->fl.oif = 0; 1887 rth->rt_oif = 0;
1882 rth->rt_gateway = daddr; 1888 rth->rt_gateway = daddr;
1883 rth->rt_spec_dst= spec_dst; 1889 rth->rt_spec_dst= spec_dst;
1884 rth->rt_genid = rt_genid(dev_net(dev)); 1890 rth->rt_genid = rt_genid(dev_net(dev));
@@ -1896,7 +1902,10 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1896 RT_CACHE_STAT_INC(in_slow_mc); 1902 RT_CACHE_STAT_INC(in_slow_mc);
1897 1903
1898 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1904 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1899 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); 1905 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1906 err = 0;
1907 if (IS_ERR(rth))
1908 err = PTR_ERR(rth);
1900 1909
1901e_nobufs: 1910e_nobufs:
1902 return -ENOBUFS; 1911 return -ENOBUFS;
@@ -2000,25 +2009,24 @@ static int __mkroute_input(struct sk_buff *skb,
2000 goto cleanup; 2009 goto cleanup;
2001 } 2010 }
2002 2011
2003 rth->fl.fl4_dst = daddr; 2012 rth->rt_key_dst = daddr;
2004 rth->rt_dst = daddr; 2013 rth->rt_dst = daddr;
2005 rth->fl.fl4_tos = tos; 2014 rth->rt_tos = tos;
2006 rth->fl.mark = skb->mark; 2015 rth->rt_mark = skb->mark;
2007 rth->fl.fl4_src = saddr; 2016 rth->rt_key_src = saddr;
2008 rth->rt_src = saddr; 2017 rth->rt_src = saddr;
2009 rth->rt_gateway = daddr; 2018 rth->rt_gateway = daddr;
2010 rth->rt_iif = 2019 rth->rt_iif = in_dev->dev->ifindex;
2011 rth->fl.iif = in_dev->dev->ifindex;
2012 rth->dst.dev = (out_dev)->dev; 2020 rth->dst.dev = (out_dev)->dev;
2013 dev_hold(rth->dst.dev); 2021 dev_hold(rth->dst.dev);
2014 rth->fl.oif = 0; 2022 rth->rt_oif = 0;
2015 rth->rt_spec_dst= spec_dst; 2023 rth->rt_spec_dst= spec_dst;
2016 2024
2017 rth->dst.input = ip_forward; 2025 rth->dst.input = ip_forward;
2018 rth->dst.output = ip_output; 2026 rth->dst.output = ip_output;
2019 rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); 2027 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2020 2028
2021 rt_set_nexthop(rth, res, res->fi, res->type, itag); 2029 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2022 2030
2023 rth->rt_flags = flags; 2031 rth->rt_flags = flags;
2024 2032
@@ -2051,7 +2059,10 @@ static int ip_mkroute_input(struct sk_buff *skb,
2051 /* put it into the cache */ 2059 /* put it into the cache */
2052 hash = rt_hash(daddr, saddr, fl->iif, 2060 hash = rt_hash(daddr, saddr, fl->iif,
2053 rt_genid(dev_net(rth->dst.dev))); 2061 rt_genid(dev_net(rth->dst.dev)));
2054 return rt_intern_hash(hash, rth, NULL, skb, fl->iif); 2062 rth = rt_intern_hash(hash, rth, skb, fl->iif);
2063 if (IS_ERR(rth))
2064 return PTR_ERR(rth);
2065 return 0;
2055} 2066}
2056 2067
2057/* 2068/*
@@ -2170,17 +2181,16 @@ local_input:
2170 rth->dst.output= ip_rt_bug; 2181 rth->dst.output= ip_rt_bug;
2171 rth->rt_genid = rt_genid(net); 2182 rth->rt_genid = rt_genid(net);
2172 2183
2173 rth->fl.fl4_dst = daddr; 2184 rth->rt_key_dst = daddr;
2174 rth->rt_dst = daddr; 2185 rth->rt_dst = daddr;
2175 rth->fl.fl4_tos = tos; 2186 rth->rt_tos = tos;
2176 rth->fl.mark = skb->mark; 2187 rth->rt_mark = skb->mark;
2177 rth->fl.fl4_src = saddr; 2188 rth->rt_key_src = saddr;
2178 rth->rt_src = saddr; 2189 rth->rt_src = saddr;
2179#ifdef CONFIG_IP_ROUTE_CLASSID 2190#ifdef CONFIG_IP_ROUTE_CLASSID
2180 rth->dst.tclassid = itag; 2191 rth->dst.tclassid = itag;
2181#endif 2192#endif
2182 rth->rt_iif = 2193 rth->rt_iif = dev->ifindex;
2183 rth->fl.iif = dev->ifindex;
2184 rth->dst.dev = net->loopback_dev; 2194 rth->dst.dev = net->loopback_dev;
2185 dev_hold(rth->dst.dev); 2195 dev_hold(rth->dst.dev);
2186 rth->rt_gateway = daddr; 2196 rth->rt_gateway = daddr;
@@ -2194,7 +2204,10 @@ local_input:
2194 } 2204 }
2195 rth->rt_type = res.type; 2205 rth->rt_type = res.type;
2196 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2206 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2197 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif); 2207 rth = rt_intern_hash(hash, rth, skb, fl.iif);
2208 err = 0;
2209 if (IS_ERR(rth))
2210 err = PTR_ERR(rth);
2198 goto out; 2211 goto out;
2199 2212
2200no_route: 2213no_route:
@@ -2256,12 +2269,12 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2256 2269
2257 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2270 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2258 rth = rcu_dereference(rth->dst.rt_next)) { 2271 rth = rcu_dereference(rth->dst.rt_next)) {
2259 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | 2272 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2260 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | 2273 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2261 (rth->fl.iif ^ iif) | 2274 (rth->rt_iif ^ iif) |
2262 rth->fl.oif | 2275 rth->rt_oif |
2263 (rth->fl.fl4_tos ^ tos)) == 0 && 2276 (rth->rt_tos ^ tos)) == 0 &&
2264 rth->fl.mark == skb->mark && 2277 rth->rt_mark == skb->mark &&
2265 net_eq(dev_net(rth->dst.dev), net) && 2278 net_eq(dev_net(rth->dst.dev), net) &&
2266 !rt_is_expired(rth)) { 2279 !rt_is_expired(rth)) {
2267 if (noref) { 2280 if (noref) {
@@ -2369,14 +2382,14 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2369 if (!rth) 2382 if (!rth)
2370 return ERR_PTR(-ENOBUFS); 2383 return ERR_PTR(-ENOBUFS);
2371 2384
2372 rth->fl.fl4_dst = oldflp->fl4_dst; 2385 rth->rt_key_dst = oldflp->fl4_dst;
2373 rth->fl.fl4_tos = tos; 2386 rth->rt_tos = tos;
2374 rth->fl.fl4_src = oldflp->fl4_src; 2387 rth->rt_key_src = oldflp->fl4_src;
2375 rth->fl.oif = oldflp->oif; 2388 rth->rt_oif = oldflp->oif;
2376 rth->fl.mark = oldflp->mark; 2389 rth->rt_mark = oldflp->mark;
2377 rth->rt_dst = fl->fl4_dst; 2390 rth->rt_dst = fl->fl4_dst;
2378 rth->rt_src = fl->fl4_src; 2391 rth->rt_src = fl->fl4_src;
2379 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2392 rth->rt_iif = 0;
2380 /* get references to the devices that are to be hold by the routing 2393 /* get references to the devices that are to be hold by the routing
2381 cache entry */ 2394 cache entry */
2382 rth->dst.dev = dev_out; 2395 rth->dst.dev = dev_out;
@@ -2411,7 +2424,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2411#endif 2424#endif
2412 } 2425 }
2413 2426
2414 rt_set_nexthop(rth, res, fi, type, 0); 2427 rt_set_nexthop(rth, oldflp, res, fi, type, 0);
2415 2428
2416 rth->rt_flags = flags; 2429 rth->rt_flags = flags;
2417 return rth; 2430 return rth;
@@ -2422,33 +2435,33 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2422 * called with rcu_read_lock(); 2435 * called with rcu_read_lock();
2423 */ 2436 */
2424 2437
2425static int ip_route_output_slow(struct net *net, struct rtable **rp, 2438static struct rtable *ip_route_output_slow(struct net *net,
2426 const struct flowi *oldflp) 2439 const struct flowi *oldflp)
2427{ 2440{
2428 u32 tos = RT_FL_TOS(oldflp); 2441 u32 tos = RT_FL_TOS(oldflp);
2429 struct flowi fl = { .fl4_dst = oldflp->fl4_dst, 2442 struct flowi fl;
2430 .fl4_src = oldflp->fl4_src,
2431 .fl4_tos = tos & IPTOS_RT_MASK,
2432 .fl4_scope = ((tos & RTO_ONLINK) ?
2433 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2434 .mark = oldflp->mark,
2435 .iif = net->loopback_dev->ifindex,
2436 .oif = oldflp->oif };
2437 struct fib_result res; 2443 struct fib_result res;
2438 unsigned int flags = 0; 2444 unsigned int flags = 0;
2439 struct net_device *dev_out = NULL; 2445 struct net_device *dev_out = NULL;
2440 struct rtable *rth; 2446 struct rtable *rth;
2441 int err;
2442
2443 2447
2444 res.fi = NULL; 2448 res.fi = NULL;
2445#ifdef CONFIG_IP_MULTIPLE_TABLES 2449#ifdef CONFIG_IP_MULTIPLE_TABLES
2446 res.r = NULL; 2450 res.r = NULL;
2447#endif 2451#endif
2448 2452
2453 fl.oif = oldflp->oif;
2454 fl.iif = net->loopback_dev->ifindex;
2455 fl.mark = oldflp->mark;
2456 fl.fl4_dst = oldflp->fl4_dst;
2457 fl.fl4_src = oldflp->fl4_src;
2458 fl.fl4_tos = tos & IPTOS_RT_MASK;
2459 fl.fl4_scope = ((tos & RTO_ONLINK) ?
2460 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2461
2449 rcu_read_lock(); 2462 rcu_read_lock();
2450 if (oldflp->fl4_src) { 2463 if (oldflp->fl4_src) {
2451 err = -EINVAL; 2464 rth = ERR_PTR(-EINVAL);
2452 if (ipv4_is_multicast(oldflp->fl4_src) || 2465 if (ipv4_is_multicast(oldflp->fl4_src) ||
2453 ipv4_is_lbcast(oldflp->fl4_src) || 2466 ipv4_is_lbcast(oldflp->fl4_src) ||
2454 ipv4_is_zeronet(oldflp->fl4_src)) 2467 ipv4_is_zeronet(oldflp->fl4_src))
@@ -2499,13 +2512,13 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2499 2512
2500 if (oldflp->oif) { 2513 if (oldflp->oif) {
2501 dev_out = dev_get_by_index_rcu(net, oldflp->oif); 2514 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
2502 err = -ENODEV; 2515 rth = ERR_PTR(-ENODEV);
2503 if (dev_out == NULL) 2516 if (dev_out == NULL)
2504 goto out; 2517 goto out;
2505 2518
2506 /* RACE: Check return value of inet_select_addr instead. */ 2519 /* RACE: Check return value of inet_select_addr instead. */
2507 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2520 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2508 err = -ENETUNREACH; 2521 rth = ERR_PTR(-ENETUNREACH);
2509 goto out; 2522 goto out;
2510 } 2523 }
2511 if (ipv4_is_local_multicast(oldflp->fl4_dst) || 2524 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
@@ -2563,7 +2576,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2563 res.type = RTN_UNICAST; 2576 res.type = RTN_UNICAST;
2564 goto make_route; 2577 goto make_route;
2565 } 2578 }
2566 err = -ENETUNREACH; 2579 rth = ERR_PTR(-ENETUNREACH);
2567 goto out; 2580 goto out;
2568 } 2581 }
2569 2582
@@ -2598,23 +2611,20 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2598 2611
2599make_route: 2612make_route:
2600 rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags); 2613 rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
2601 if (IS_ERR(rth)) 2614 if (!IS_ERR(rth)) {
2602 err = PTR_ERR(rth);
2603 else {
2604 unsigned int hash; 2615 unsigned int hash;
2605 2616
2606 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2617 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2607 rt_genid(dev_net(dev_out))); 2618 rt_genid(dev_net(dev_out)));
2608 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif); 2619 rth = rt_intern_hash(hash, rth, NULL, oldflp->oif);
2609 } 2620 }
2610 2621
2611out: 2622out:
2612 rcu_read_unlock(); 2623 rcu_read_unlock();
2613 return err; 2624 return rth;
2614} 2625}
2615 2626
2616int __ip_route_output_key(struct net *net, struct rtable **rp, 2627struct rtable *__ip_route_output_key(struct net *net, const struct flowi *flp)
2617 const struct flowi *flp)
2618{ 2628{
2619 struct rtable *rth; 2629 struct rtable *rth;
2620 unsigned int hash; 2630 unsigned int hash;
@@ -2627,27 +2637,26 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2627 rcu_read_lock_bh(); 2637 rcu_read_lock_bh();
2628 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; 2638 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2629 rth = rcu_dereference_bh(rth->dst.rt_next)) { 2639 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2630 if (rth->fl.fl4_dst == flp->fl4_dst && 2640 if (rth->rt_key_dst == flp->fl4_dst &&
2631 rth->fl.fl4_src == flp->fl4_src && 2641 rth->rt_key_src == flp->fl4_src &&
2632 rt_is_output_route(rth) && 2642 rt_is_output_route(rth) &&
2633 rth->fl.oif == flp->oif && 2643 rth->rt_oif == flp->oif &&
2634 rth->fl.mark == flp->mark && 2644 rth->rt_mark == flp->mark &&
2635 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2645 !((rth->rt_tos ^ flp->fl4_tos) &
2636 (IPTOS_RT_MASK | RTO_ONLINK)) && 2646 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2637 net_eq(dev_net(rth->dst.dev), net) && 2647 net_eq(dev_net(rth->dst.dev), net) &&
2638 !rt_is_expired(rth)) { 2648 !rt_is_expired(rth)) {
2639 dst_use(&rth->dst, jiffies); 2649 dst_use(&rth->dst, jiffies);
2640 RT_CACHE_STAT_INC(out_hit); 2650 RT_CACHE_STAT_INC(out_hit);
2641 rcu_read_unlock_bh(); 2651 rcu_read_unlock_bh();
2642 *rp = rth; 2652 return rth;
2643 return 0;
2644 } 2653 }
2645 RT_CACHE_STAT_INC(out_hlist_search); 2654 RT_CACHE_STAT_INC(out_hlist_search);
2646 } 2655 }
2647 rcu_read_unlock_bh(); 2656 rcu_read_unlock_bh();
2648 2657
2649slow_output: 2658slow_output:
2650 return ip_route_output_slow(net, rp, flp); 2659 return ip_route_output_slow(net, flp);
2651} 2660}
2652EXPORT_SYMBOL_GPL(__ip_route_output_key); 2661EXPORT_SYMBOL_GPL(__ip_route_output_key);
2653 2662
@@ -2675,12 +2684,10 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2675 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2684 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2676}; 2685};
2677 2686
2678 2687struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2679static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2680{ 2688{
2681 struct rtable *ort = *rp; 2689 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
2682 struct rtable *rt = (struct rtable *) 2690 struct rtable *ort = (struct rtable *) dst_orig;
2683 dst_alloc(&ipv4_dst_blackhole_ops, 1);
2684 2691
2685 if (rt) { 2692 if (rt) {
2686 struct dst_entry *new = &rt->dst; 2693 struct dst_entry *new = &rt->dst;
@@ -2694,7 +2701,12 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2694 if (new->dev) 2701 if (new->dev)
2695 dev_hold(new->dev); 2702 dev_hold(new->dev);
2696 2703
2697 rt->fl = ort->fl; 2704 rt->rt_key_dst = ort->rt_key_dst;
2705 rt->rt_key_src = ort->rt_key_src;
2706 rt->rt_tos = ort->rt_tos;
2707 rt->rt_iif = ort->rt_iif;
2708 rt->rt_oif = ort->rt_oif;
2709 rt->rt_mark = ort->rt_mark;
2698 2710
2699 rt->rt_genid = rt_genid(net); 2711 rt->rt_genid = rt_genid(net);
2700 rt->rt_flags = ort->rt_flags; 2712 rt->rt_flags = ort->rt_flags;
@@ -2714,42 +2726,31 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2714 dst_free(new); 2726 dst_free(new);
2715 } 2727 }
2716 2728
2717 dst_release(&(*rp)->dst); 2729 dst_release(dst_orig);
2718 *rp = rt; 2730
2719 return rt ? 0 : -ENOMEM; 2731 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2720} 2732}
2721 2733
2722int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, 2734struct rtable *ip_route_output_flow(struct net *net, struct flowi *flp,
2723 struct sock *sk, int flags) 2735 struct sock *sk)
2724{ 2736{
2725 int err; 2737 struct rtable *rt = __ip_route_output_key(net, flp);
2726 2738
2727 if ((err = __ip_route_output_key(net, rp, flp)) != 0) 2739 if (IS_ERR(rt))
2728 return err; 2740 return rt;
2729 2741
2730 if (flp->proto) { 2742 if (flp->proto) {
2731 if (!flp->fl4_src) 2743 if (!flp->fl4_src)
2732 flp->fl4_src = (*rp)->rt_src; 2744 flp->fl4_src = rt->rt_src;
2733 if (!flp->fl4_dst) 2745 if (!flp->fl4_dst)
2734 flp->fl4_dst = (*rp)->rt_dst; 2746 flp->fl4_dst = rt->rt_dst;
2735 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk, 2747 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flp, sk, 0);
2736 flags ? XFRM_LOOKUP_WAIT : 0);
2737 if (err == -EREMOTE)
2738 err = ipv4_dst_blackhole(net, rp, flp);
2739
2740 return err;
2741 } 2748 }
2742 2749
2743 return 0; 2750 return rt;
2744} 2751}
2745EXPORT_SYMBOL_GPL(ip_route_output_flow); 2752EXPORT_SYMBOL_GPL(ip_route_output_flow);
2746 2753
2747int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2748{
2749 return ip_route_output_flow(net, rp, flp, NULL, 0);
2750}
2751EXPORT_SYMBOL(ip_route_output_key);
2752
2753static int rt_fill_info(struct net *net, 2754static int rt_fill_info(struct net *net,
2754 struct sk_buff *skb, u32 pid, u32 seq, int event, 2755 struct sk_buff *skb, u32 pid, u32 seq, int event,
2755 int nowait, unsigned int flags) 2756 int nowait, unsigned int flags)
@@ -2768,7 +2769,7 @@ static int rt_fill_info(struct net *net,
2768 r->rtm_family = AF_INET; 2769 r->rtm_family = AF_INET;
2769 r->rtm_dst_len = 32; 2770 r->rtm_dst_len = 32;
2770 r->rtm_src_len = 0; 2771 r->rtm_src_len = 0;
2771 r->rtm_tos = rt->fl.fl4_tos; 2772 r->rtm_tos = rt->rt_tos;
2772 r->rtm_table = RT_TABLE_MAIN; 2773 r->rtm_table = RT_TABLE_MAIN;
2773 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2774 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2774 r->rtm_type = rt->rt_type; 2775 r->rtm_type = rt->rt_type;
@@ -2780,9 +2781,9 @@ static int rt_fill_info(struct net *net,
2780 2781
2781 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); 2782 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2782 2783
2783 if (rt->fl.fl4_src) { 2784 if (rt->rt_key_src) {
2784 r->rtm_src_len = 32; 2785 r->rtm_src_len = 32;
2785 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); 2786 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
2786 } 2787 }
2787 if (rt->dst.dev) 2788 if (rt->dst.dev)
2788 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); 2789 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
@@ -2792,7 +2793,7 @@ static int rt_fill_info(struct net *net,
2792#endif 2793#endif
2793 if (rt_is_input_route(rt)) 2794 if (rt_is_input_route(rt))
2794 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2795 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2795 else if (rt->rt_src != rt->fl.fl4_src) 2796 else if (rt->rt_src != rt->rt_key_src)
2796 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 2797 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2797 2798
2798 if (rt->rt_dst != rt->rt_gateway) 2799 if (rt->rt_dst != rt->rt_gateway)
@@ -2801,8 +2802,8 @@ static int rt_fill_info(struct net *net,
2801 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2802 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2802 goto nla_put_failure; 2803 goto nla_put_failure;
2803 2804
2804 if (rt->fl.mark) 2805 if (rt->rt_mark)
2805 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark); 2806 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2806 2807
2807 error = rt->dst.error; 2808 error = rt->dst.error;
2808 expires = (rt->peer && rt->peer->pmtu_expires) ? 2809 expires = (rt->peer && rt->peer->pmtu_expires) ?
@@ -2836,7 +2837,7 @@ static int rt_fill_info(struct net *net,
2836 } 2837 }
2837 } else 2838 } else
2838#endif 2839#endif
2839 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); 2840 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
2840 } 2841 }
2841 2842
2842 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, 2843 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -2917,7 +2918,11 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2917 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2918 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2918 .mark = mark, 2919 .mark = mark,
2919 }; 2920 };
2920 err = ip_route_output_key(net, &rt, &fl); 2921 rt = ip_route_output_key(net, &fl);
2922
2923 err = 0;
2924 if (IS_ERR(rt))
2925 err = PTR_ERR(rt);
2921 } 2926 }
2922 2927
2923 if (err) 2928 if (err)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 47519205a014..0ad6ddf638a7 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
355 .fl_ip_sport = th->dest, 355 .fl_ip_sport = th->dest,
356 .fl_ip_dport = th->source }; 356 .fl_ip_dport = th->source };
357 security_req_classify_flow(req, &fl); 357 security_req_classify_flow(req, &fl);
358 if (ip_route_output_key(sock_net(sk), &rt, &fl)) { 358 rt = ip_route_output_key(sock_net(sk), &fl);
359 if (IS_ERR(rt)) {
359 reqsk_free(req); 360 reqsk_free(req);
360 goto out; 361 goto out;
361 } 362 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2f692cefd3b0..08ea735b9d72 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1222 } 1222 }
1223 1223
1224 /* D-SACK for already forgotten data... Do dumb counting. */ 1224 /* D-SACK for already forgotten data... Do dumb counting. */
1225 if (dup_sack && 1225 if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1226 !after(end_seq_0, prior_snd_una) && 1226 !after(end_seq_0, prior_snd_una) &&
1227 after(end_seq_0, tp->undo_marker)) 1227 after(end_seq_0, tp->undo_marker))
1228 tp->undo_retrans--; 1228 tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1299 1299
1300 /* Account D-SACK for retransmitted packet. */ 1300 /* Account D-SACK for retransmitted packet. */
1301 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1301 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1302 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1302 if (tp->undo_marker && tp->undo_retrans &&
1303 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1303 tp->undo_retrans--; 1304 tp->undo_retrans--;
1304 if (sacked & TCPCB_SACKED_ACKED) 1305 if (sacked & TCPCB_SACKED_ACKED)
1305 state->reord = min(fack_count, state->reord); 1306 state->reord = min(fack_count, state->reord);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27a0cc8cc888..f7e6c2c2d2bb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -152,7 +152,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 __be16 orig_sport, orig_dport; 152 __be16 orig_sport, orig_dport;
153 struct rtable *rt; 153 struct rtable *rt;
154 __be32 daddr, nexthop; 154 __be32 daddr, nexthop;
155 int tmp;
156 int err; 155 int err;
157 156
158 if (addr_len < sizeof(struct sockaddr_in)) 157 if (addr_len < sizeof(struct sockaddr_in))
@@ -170,14 +169,15 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
170 169
171 orig_sport = inet->inet_sport; 170 orig_sport = inet->inet_sport;
172 orig_dport = usin->sin_port; 171 orig_dport = usin->sin_port;
173 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, 172 rt = ip_route_connect(nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 IPPROTO_TCP, 174 IPPROTO_TCP,
176 orig_sport, orig_dport, sk, 1); 175 orig_sport, orig_dport, sk, true);
177 if (tmp < 0) { 176 if (IS_ERR(rt)) {
178 if (tmp == -ENETUNREACH) 177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 return tmp; 180 return err;
181 } 181 }
182 182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
@@ -236,12 +236,14 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 if (err) 236 if (err)
237 goto failure; 237 goto failure;
238 238
239 err = ip_route_newports(&rt, IPPROTO_TCP, 239 rt = ip_route_newports(rt, IPPROTO_TCP,
240 orig_sport, orig_dport, 240 orig_sport, orig_dport,
241 inet->inet_sport, inet->inet_dport, sk); 241 inet->inet_sport, inet->inet_dport, sk);
242 if (err) 242 if (IS_ERR(rt)) {
243 err = PTR_ERR(rt);
244 rt = NULL;
243 goto failure; 245 goto failure;
244 246 }
245 /* OK, now commit destination to socket. */ 247 /* OK, now commit destination to socket. */
246 sk->sk_gso_type = SKB_GSO_TCPV4; 248 sk->sk_gso_type = SKB_GSO_TCPV4;
247 sk_setup_caps(sk, &rt->dst); 249 sk_setup_caps(sk, &rt->dst);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 406f320336e6..dfa5beb0c1c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2162 if (!tp->retrans_stamp) 2162 if (!tp->retrans_stamp)
2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2164 2164
2165 tp->undo_retrans++; 2165 tp->undo_retrans += tcp_skb_pcount(skb);
2166 2166
2167 /* snd_nxt is stored to detect loss of retransmitted segment, 2167 /* snd_nxt is stored to detect loss of retransmitted segment,
2168 * see tcp_input.c tcp_sacktag_write_queue(). 2168 * see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d37baaa1dbe3..c9a73e5b26a3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -663,75 +663,72 @@ void udp_flush_pending_frames(struct sock *sk)
663EXPORT_SYMBOL(udp_flush_pending_frames); 663EXPORT_SYMBOL(udp_flush_pending_frames);
664 664
665/** 665/**
666 * udp4_hwcsum_outgoing - handle outgoing HW checksumming 666 * udp4_hwcsum - handle outgoing HW checksumming
667 * @sk: socket we are sending on
668 * @skb: sk_buff containing the filled-in UDP header 667 * @skb: sk_buff containing the filled-in UDP header
669 * (checksum field must be zeroed out) 668 * (checksum field must be zeroed out)
669 * @src: source IP address
670 * @dst: destination IP address
670 */ 671 */
671static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 672static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
672 __be32 src, __be32 dst, int len)
673{ 673{
674 unsigned int offset;
675 struct udphdr *uh = udp_hdr(skb); 674 struct udphdr *uh = udp_hdr(skb);
675 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
676 int offset = skb_transport_offset(skb);
677 int len = skb->len - offset;
678 int hlen = len;
676 __wsum csum = 0; 679 __wsum csum = 0;
677 680
678 if (skb_queue_len(&sk->sk_write_queue) == 1) { 681 if (!frags) {
679 /* 682 /*
680 * Only one fragment on the socket. 683 * Only one fragment on the socket.
681 */ 684 */
682 skb->csum_start = skb_transport_header(skb) - skb->head; 685 skb->csum_start = skb_transport_header(skb) - skb->head;
683 skb->csum_offset = offsetof(struct udphdr, check); 686 skb->csum_offset = offsetof(struct udphdr, check);
684 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); 687 uh->check = ~csum_tcpudp_magic(src, dst, len,
688 IPPROTO_UDP, 0);
685 } else { 689 } else {
686 /* 690 /*
687 * HW-checksum won't work as there are two or more 691 * HW-checksum won't work as there are two or more
688 * fragments on the socket so that all csums of sk_buffs 692 * fragments on the socket so that all csums of sk_buffs
689 * should be together 693 * should be together
690 */ 694 */
691 offset = skb_transport_offset(skb); 695 do {
692 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 696 csum = csum_add(csum, frags->csum);
697 hlen -= frags->len;
698 } while ((frags = frags->next));
693 699
700 csum = skb_checksum(skb, offset, hlen, csum);
694 skb->ip_summed = CHECKSUM_NONE; 701 skb->ip_summed = CHECKSUM_NONE;
695 702
696 skb_queue_walk(&sk->sk_write_queue, skb) {
697 csum = csum_add(csum, skb->csum);
698 }
699
700 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); 703 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
701 if (uh->check == 0) 704 if (uh->check == 0)
702 uh->check = CSUM_MANGLED_0; 705 uh->check = CSUM_MANGLED_0;
703 } 706 }
704} 707}
705 708
706/* 709static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
707 * Push out all pending data as one UDP datagram. Socket is locked.
708 */
709static int udp_push_pending_frames(struct sock *sk)
710{ 710{
711 struct udp_sock *up = udp_sk(sk); 711 struct sock *sk = skb->sk;
712 struct inet_sock *inet = inet_sk(sk); 712 struct inet_sock *inet = inet_sk(sk);
713 struct flowi *fl = &inet->cork.fl;
714 struct sk_buff *skb;
715 struct udphdr *uh; 713 struct udphdr *uh;
714 struct rtable *rt = (struct rtable *)skb_dst(skb);
716 int err = 0; 715 int err = 0;
717 int is_udplite = IS_UDPLITE(sk); 716 int is_udplite = IS_UDPLITE(sk);
717 int offset = skb_transport_offset(skb);
718 int len = skb->len - offset;
718 __wsum csum = 0; 719 __wsum csum = 0;
719 720
720 /* Grab the skbuff where UDP header space exists. */
721 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
722 goto out;
723
724 /* 721 /*
725 * Create a UDP header 722 * Create a UDP header
726 */ 723 */
727 uh = udp_hdr(skb); 724 uh = udp_hdr(skb);
728 uh->source = fl->fl_ip_sport; 725 uh->source = inet->inet_sport;
729 uh->dest = fl->fl_ip_dport; 726 uh->dest = dport;
730 uh->len = htons(up->len); 727 uh->len = htons(len);
731 uh->check = 0; 728 uh->check = 0;
732 729
733 if (is_udplite) /* UDP-Lite */ 730 if (is_udplite) /* UDP-Lite */
734 csum = udplite_csum_outgoing(sk, skb); 731 csum = udplite_csum(skb);
735 732
736 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ 733 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
737 734
@@ -740,20 +737,20 @@ static int udp_push_pending_frames(struct sock *sk)
740 737
741 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 738 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
742 739
743 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len); 740 udp4_hwcsum(skb, rt->rt_src, daddr);
744 goto send; 741 goto send;
745 742
746 } else /* `normal' UDP */ 743 } else
747 csum = udp_csum_outgoing(sk, skb); 744 csum = udp_csum(skb);
748 745
749 /* add protocol-dependent pseudo-header */ 746 /* add protocol-dependent pseudo-header */
750 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, 747 uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
751 sk->sk_protocol, csum); 748 sk->sk_protocol, csum);
752 if (uh->check == 0) 749 if (uh->check == 0)
753 uh->check = CSUM_MANGLED_0; 750 uh->check = CSUM_MANGLED_0;
754 751
755send: 752send:
756 err = ip_push_pending_frames(sk); 753 err = ip_send_skb(skb);
757 if (err) { 754 if (err) {
758 if (err == -ENOBUFS && !inet->recverr) { 755 if (err == -ENOBUFS && !inet->recverr) {
759 UDP_INC_STATS_USER(sock_net(sk), 756 UDP_INC_STATS_USER(sock_net(sk),
@@ -763,6 +760,26 @@ send:
763 } else 760 } else
764 UDP_INC_STATS_USER(sock_net(sk), 761 UDP_INC_STATS_USER(sock_net(sk),
765 UDP_MIB_OUTDATAGRAMS, is_udplite); 762 UDP_MIB_OUTDATAGRAMS, is_udplite);
763 return err;
764}
765
766/*
767 * Push out all pending data as one UDP datagram. Socket is locked.
768 */
769static int udp_push_pending_frames(struct sock *sk)
770{
771 struct udp_sock *up = udp_sk(sk);
772 struct inet_sock *inet = inet_sk(sk);
773 struct flowi *fl = &inet->cork.fl;
774 struct sk_buff *skb;
775 int err = 0;
776
777 skb = ip_finish_skb(sk);
778 if (!skb)
779 goto out;
780
781 err = udp_send_skb(skb, fl->fl4_dst, fl->fl_ip_dport);
782
766out: 783out:
767 up->len = 0; 784 up->len = 0;
768 up->pending = 0; 785 up->pending = 0;
@@ -785,6 +802,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
785 int err, is_udplite = IS_UDPLITE(sk); 802 int err, is_udplite = IS_UDPLITE(sk);
786 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 803 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
787 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 804 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
805 struct sk_buff *skb;
788 806
789 if (len > 0xFFFF) 807 if (len > 0xFFFF)
790 return -EMSGSIZE; 808 return -EMSGSIZE;
@@ -799,6 +817,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
799 ipc.opt = NULL; 817 ipc.opt = NULL;
800 ipc.tx_flags = 0; 818 ipc.tx_flags = 0;
801 819
820 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
821
802 if (up->pending) { 822 if (up->pending) {
803 /* 823 /*
804 * There are pending frames. 824 * There are pending frames.
@@ -894,14 +914,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
894 .fl4_src = saddr, 914 .fl4_src = saddr,
895 .fl4_tos = tos, 915 .fl4_tos = tos,
896 .proto = sk->sk_protocol, 916 .proto = sk->sk_protocol,
897 .flags = inet_sk_flowi_flags(sk), 917 .flags = (inet_sk_flowi_flags(sk) |
918 FLOWI_FLAG_CAN_SLEEP),
898 .fl_ip_sport = inet->inet_sport, 919 .fl_ip_sport = inet->inet_sport,
899 .fl_ip_dport = dport }; 920 .fl_ip_dport = dport
921 };
900 struct net *net = sock_net(sk); 922 struct net *net = sock_net(sk);
901 923
902 security_sk_classify_flow(sk, &fl); 924 security_sk_classify_flow(sk, &fl);
903 err = ip_route_output_flow(net, &rt, &fl, sk, 1); 925 rt = ip_route_output_flow(net, &fl, sk);
904 if (err) { 926 if (IS_ERR(rt)) {
927 err = PTR_ERR(rt);
928 rt = NULL;
905 if (err == -ENETUNREACH) 929 if (err == -ENETUNREACH)
906 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 930 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
907 goto out; 931 goto out;
@@ -923,6 +947,17 @@ back_from_confirm:
923 if (!ipc.addr) 947 if (!ipc.addr)
924 daddr = ipc.addr = rt->rt_dst; 948 daddr = ipc.addr = rt->rt_dst;
925 949
950 /* Lockless fast path for the non-corking case. */
951 if (!corkreq) {
952 skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
953 sizeof(struct udphdr), &ipc, &rt,
954 msg->msg_flags);
955 err = PTR_ERR(skb);
956 if (skb && !IS_ERR(skb))
957 err = udp_send_skb(skb, daddr, dport);
958 goto out;
959 }
960
926 lock_sock(sk); 961 lock_sock(sk);
927 if (unlikely(up->pending)) { 962 if (unlikely(up->pending)) {
928 /* The socket is already corked while preparing it. */ 963 /* The socket is already corked while preparing it. */
@@ -944,7 +979,6 @@ back_from_confirm:
944 979
945do_append_data: 980do_append_data:
946 up->len += ulen; 981 up->len += ulen;
947 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
948 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, 982 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
949 sizeof(struct udphdr), &ipc, &rt, 983 sizeof(struct udphdr), &ipc, &rt,
950 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 984 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 63aa88efdcef..c70c42e7e77b 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -26,18 +26,16 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
26 .fl4_dst = daddr->a4, 26 .fl4_dst = daddr->a4,
27 .fl4_tos = tos, 27 .fl4_tos = tos,
28 }; 28 };
29 struct dst_entry *dst;
30 struct rtable *rt; 29 struct rtable *rt;
31 int err;
32 30
33 if (saddr) 31 if (saddr)
34 fl.fl4_src = saddr->a4; 32 fl.fl4_src = saddr->a4;
35 33
36 err = __ip_route_output_key(net, &rt, &fl); 34 rt = __ip_route_output_key(net, &fl);
37 dst = &rt->dst; 35 if (!IS_ERR(rt))
38 if (err) 36 return &rt->dst;
39 dst = ERR_PTR(err); 37
40 return dst; 38 return ERR_CAST(rt);
41} 39}
42 40
43static int xfrm4_get_saddr(struct net *net, 41static int xfrm4_get_saddr(struct net *net,
@@ -72,7 +70,12 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
72{ 70{
73 struct rtable *rt = (struct rtable *)xdst->route; 71 struct rtable *rt = (struct rtable *)xdst->route;
74 72
75 xdst->u.rt.fl = *fl; 73 rt->rt_key_dst = fl->fl4_dst;
74 rt->rt_key_src = fl->fl4_src;
75 rt->rt_tos = fl->fl4_tos;
76 rt->rt_iif = fl->iif;
77 rt->rt_oif = fl->oif;
78 rt->rt_mark = fl->mark;
76 79
77 xdst->u.dst.dev = dev; 80 xdst->u.dst.dev = dev;
78 dev_hold(dev); 81 dev_hold(dev);
@@ -234,6 +237,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
234 .get_tos = xfrm4_get_tos, 237 .get_tos = xfrm4_get_tos,
235 .init_path = xfrm4_init_path, 238 .init_path = xfrm4_init_path,
236 .fill_dst = xfrm4_fill_dst, 239 .fill_dst = xfrm4_fill_dst,
240 .blackhole_route = ipv4_blackhole_route,
237}; 241};
238 242
239#ifdef CONFIG_SYSCTL 243#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3194aa909872..a88b2e9d25f1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -644,9 +644,8 @@ EXPORT_SYMBOL(inet6_unregister_protosw);
644 644
645int inet6_sk_rebuild_header(struct sock *sk) 645int inet6_sk_rebuild_header(struct sock *sk)
646{ 646{
647 int err;
648 struct dst_entry *dst;
649 struct ipv6_pinfo *np = inet6_sk(sk); 647 struct ipv6_pinfo *np = inet6_sk(sk);
648 struct dst_entry *dst;
650 649
651 dst = __sk_dst_check(sk, np->dst_cookie); 650 dst = __sk_dst_check(sk, np->dst_cookie);
652 651
@@ -668,17 +667,11 @@ int inet6_sk_rebuild_header(struct sock *sk)
668 667
669 final_p = fl6_update_dst(&fl, np->opt, &final); 668 final_p = fl6_update_dst(&fl, np->opt, &final);
670 669
671 err = ip6_dst_lookup(sk, &dst, &fl); 670 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
672 if (err) { 671 if (IS_ERR(dst)) {
673 sk->sk_route_caps = 0; 672 sk->sk_route_caps = 0;
674 return err; 673 sk->sk_err_soft = -PTR_ERR(dst);
675 } 674 return PTR_ERR(dst);
676 if (final_p)
677 ipv6_addr_copy(&fl.fl6_dst, final_p);
678
679 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
680 sk->sk_err_soft = -err;
681 return err;
682 } 675 }
683 676
684 __ip6_dst_store(sk, dst, NULL, NULL); 677 __ip6_dst_store(sk, dst, NULL, NULL);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 320bdb877eed..be3a781c0085 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -162,18 +162,11 @@ ipv4_connected:
162 opt = flowlabel ? flowlabel->opt : np->opt; 162 opt = flowlabel ? flowlabel->opt : np->opt;
163 final_p = fl6_update_dst(&fl, opt, &final); 163 final_p = fl6_update_dst(&fl, opt, &final);
164 164
165 err = ip6_dst_lookup(sk, &dst, &fl); 165 dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
166 if (err) 166 err = 0;
167 if (IS_ERR(dst)) {
168 err = PTR_ERR(dst);
167 goto out; 169 goto out;
168 if (final_p)
169 ipv6_addr_copy(&fl.fl6_dst, final_p);
170
171 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
172 if (err < 0) {
173 if (err == -EREMOTE)
174 err = ip6_dst_blackhole(sk, &dst, &fl);
175 if (err < 0)
176 goto out;
177 } 170 }
178 171
179 /* source address lookup done in ip6_dst_lookup */ 172 /* source address lookup done in ip6_dst_lookup */
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a31d91b04c87..55665956b3a8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -300,6 +300,68 @@ static void mip6_addr_swap(struct sk_buff *skb)
300static inline void mip6_addr_swap(struct sk_buff *skb) {} 300static inline void mip6_addr_swap(struct sk_buff *skb) {}
301#endif 301#endif
302 302
303static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
304 struct sock *sk, struct flowi *fl)
305{
306 struct dst_entry *dst, *dst2;
307 struct flowi fl2;
308 int err;
309
310 err = ip6_dst_lookup(sk, &dst, fl);
311 if (err)
312 return ERR_PTR(err);
313
314 /*
315 * We won't send icmp if the destination is known
316 * anycast.
317 */
318 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
319 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
320 dst_release(dst);
321 return ERR_PTR(-EINVAL);
322 }
323
324 /* No need to clone since we're just using its address. */
325 dst2 = dst;
326
327 dst = xfrm_lookup(net, dst, fl, sk, 0);
328 if (!IS_ERR(dst)) {
329 if (dst != dst2)
330 return dst;
331 } else {
332 if (PTR_ERR(dst) == -EPERM)
333 dst = NULL;
334 else
335 return dst;
336 }
337
338 err = xfrm_decode_session_reverse(skb, &fl2, AF_INET6);
339 if (err)
340 goto relookup_failed;
341
342 err = ip6_dst_lookup(sk, &dst2, &fl2);
343 if (err)
344 goto relookup_failed;
345
346 dst2 = xfrm_lookup(net, dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
347 if (!IS_ERR(dst2)) {
348 dst_release(dst);
349 dst = dst2;
350 } else {
351 err = PTR_ERR(dst2);
352 if (err == -EPERM) {
353 dst_release(dst);
354 return dst2;
355 } else
356 goto relookup_failed;
357 }
358
359relookup_failed:
360 if (dst)
361 return dst;
362 return ERR_PTR(err);
363}
364
303/* 365/*
304 * Send an ICMP message in response to a packet in error 366 * Send an ICMP message in response to a packet in error
305 */ 367 */
@@ -312,10 +374,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
312 struct ipv6_pinfo *np; 374 struct ipv6_pinfo *np;
313 struct in6_addr *saddr = NULL; 375 struct in6_addr *saddr = NULL;
314 struct dst_entry *dst; 376 struct dst_entry *dst;
315 struct dst_entry *dst2;
316 struct icmp6hdr tmp_hdr; 377 struct icmp6hdr tmp_hdr;
317 struct flowi fl; 378 struct flowi fl;
318 struct flowi fl2;
319 struct icmpv6_msg msg; 379 struct icmpv6_msg msg;
320 int iif = 0; 380 int iif = 0;
321 int addr_type = 0; 381 int addr_type = 0;
@@ -408,57 +468,10 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
408 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 468 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
409 fl.oif = np->mcast_oif; 469 fl.oif = np->mcast_oif;
410 470
411 err = ip6_dst_lookup(sk, &dst, &fl); 471 dst = icmpv6_route_lookup(net, skb, sk, &fl);
412 if (err) 472 if (IS_ERR(dst))
413 goto out; 473 goto out;
414 474
415 /*
416 * We won't send icmp if the destination is known
417 * anycast.
418 */
419 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
420 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
421 goto out_dst_release;
422 }
423
424 /* No need to clone since we're just using its address. */
425 dst2 = dst;
426
427 err = xfrm_lookup(net, &dst, &fl, sk, 0);
428 switch (err) {
429 case 0:
430 if (dst != dst2)
431 goto route_done;
432 break;
433 case -EPERM:
434 dst = NULL;
435 break;
436 default:
437 goto out;
438 }
439
440 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
441 goto relookup_failed;
442
443 if (ip6_dst_lookup(sk, &dst2, &fl2))
444 goto relookup_failed;
445
446 err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
447 switch (err) {
448 case 0:
449 dst_release(dst);
450 dst = dst2;
451 break;
452 case -EPERM:
453 goto out_dst_release;
454 default:
455relookup_failed:
456 if (!dst)
457 goto out;
458 break;
459 }
460
461route_done:
462 if (ipv6_addr_is_multicast(&fl.fl6_dst)) 475 if (ipv6_addr_is_multicast(&fl.fl6_dst))
463 hlimit = np->mcast_hops; 476 hlimit = np->mcast_hops;
464 else 477 else
@@ -545,7 +558,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
545 err = ip6_dst_lookup(sk, &dst, &fl); 558 err = ip6_dst_lookup(sk, &dst, &fl);
546 if (err) 559 if (err)
547 goto out; 560 goto out;
548 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) 561 dst = xfrm_lookup(net, dst, &fl, sk, 0);
562 if (IS_ERR(dst))
549 goto out; 563 goto out;
550 564
551 if (ipv6_addr_is_multicast(&fl.fl6_dst)) 565 if (ipv6_addr_is_multicast(&fl.fl6_dst))
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index d144e629d2b4..d687e1397333 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -74,13 +74,8 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
74 fl.fl_ip_sport = inet_rsk(req)->loc_port; 74 fl.fl_ip_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, &fl); 75 security_req_classify_flow(req, &fl);
76 76
77 if (ip6_dst_lookup(sk, &dst, &fl)) 77 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
78 return NULL; 78 if (IS_ERR(dst))
79
80 if (final_p)
81 ipv6_addr_copy(&fl.fl6_dst, final_p);
82
83 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
84 return NULL; 79 return NULL;
85 80
86 return dst; 81 return dst;
@@ -234,21 +229,13 @@ int inet6_csk_xmit(struct sk_buff *skb)
234 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 229 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
235 230
236 if (dst == NULL) { 231 if (dst == NULL) {
237 int err = ip6_dst_lookup(sk, &dst, &fl); 232 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
238
239 if (err) {
240 sk->sk_err_soft = -err;
241 kfree_skb(skb);
242 return err;
243 }
244
245 if (final_p)
246 ipv6_addr_copy(&fl.fl6_dst, final_p);
247 233
248 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) { 234 if (IS_ERR(dst)) {
235 sk->sk_err_soft = -PTR_ERR(dst);
249 sk->sk_route_caps = 0; 236 sk->sk_route_caps = 0;
250 kfree_skb(skb); 237 kfree_skb(skb);
251 return err; 238 return PTR_ERR(dst);
252 } 239 }
253 240
254 __inet6_csk_dst_store(sk, dst, NULL, NULL); 241 __inet6_csk_dst_store(sk, dst, NULL, NULL);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 25a2647f9942..adaffaf84555 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1002,29 +1002,71 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
1002EXPORT_SYMBOL_GPL(ip6_dst_lookup); 1002EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1003 1003
1004/** 1004/**
1005 * ip6_sk_dst_lookup - perform socket cached route lookup on flow 1005 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1006 * @sk: socket which provides route info
1007 * @fl: flow to lookup
1008 * @final_dst: final destination address for ipsec lookup
1009 * @can_sleep: we are in a sleepable context
1010 *
1011 * This function performs a route lookup on the given flow.
1012 *
1013 * It returns a valid dst pointer on success, or a pointer encoded
1014 * error code.
1015 */
1016struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi *fl,
1017 const struct in6_addr *final_dst,
1018 bool can_sleep)
1019{
1020 struct dst_entry *dst = NULL;
1021 int err;
1022
1023 err = ip6_dst_lookup_tail(sk, &dst, fl);
1024 if (err)
1025 return ERR_PTR(err);
1026 if (final_dst)
1027 ipv6_addr_copy(&fl->fl6_dst, final_dst);
1028 if (can_sleep)
1029 fl->flags |= FLOWI_FLAG_CAN_SLEEP;
1030
1031 return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
1032}
1033EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1034
1035/**
1036 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1006 * @sk: socket which provides the dst cache and route info 1037 * @sk: socket which provides the dst cache and route info
1007 * @dst: pointer to dst_entry * for result
1008 * @fl: flow to lookup 1038 * @fl: flow to lookup
1039 * @final_dst: final destination address for ipsec lookup
1040 * @can_sleep: we are in a sleepable context
1009 * 1041 *
1010 * This function performs a route lookup on the given flow with the 1042 * This function performs a route lookup on the given flow with the
1011 * possibility of using the cached route in the socket if it is valid. 1043 * possibility of using the cached route in the socket if it is valid.
1012 * It will take the socket dst lock when operating on the dst cache. 1044 * It will take the socket dst lock when operating on the dst cache.
1013 * As a result, this function can only be used in process context. 1045 * As a result, this function can only be used in process context.
1014 * 1046 *
1015 * It returns zero on success, or a standard errno code on error. 1047 * It returns a valid dst pointer on success, or a pointer encoded
1048 * error code.
1016 */ 1049 */
1017int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 1050struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi *fl,
1051 const struct in6_addr *final_dst,
1052 bool can_sleep)
1018{ 1053{
1019 *dst = NULL; 1054 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1020 if (sk) { 1055 int err;
1021 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1022 *dst = ip6_sk_dst_check(sk, *dst, fl);
1023 }
1024 1056
1025 return ip6_dst_lookup_tail(sk, dst, fl); 1057 dst = ip6_sk_dst_check(sk, dst, fl);
1058
1059 err = ip6_dst_lookup_tail(sk, &dst, fl);
1060 if (err)
1061 return ERR_PTR(err);
1062 if (final_dst)
1063 ipv6_addr_copy(&fl->fl6_dst, final_dst);
1064 if (can_sleep)
1065 fl->flags |= FLOWI_FLAG_CAN_SLEEP;
1066
1067 return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
1026} 1068}
1027EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); 1069EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1028 1070
1029static inline int ip6_ufo_append_data(struct sock *sk, 1071static inline int ip6_ufo_append_data(struct sock *sk,
1030 int getfrag(void *from, char *to, int offset, int len, 1072 int getfrag(void *from, char *to, int offset, int len,
@@ -1061,7 +1103,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1061 1103
1062 skb->ip_summed = CHECKSUM_PARTIAL; 1104 skb->ip_summed = CHECKSUM_PARTIAL;
1063 skb->csum = 0; 1105 skb->csum = 0;
1064 sk->sk_sndmsg_off = 0;
1065 } 1106 }
1066 1107
1067 err = skb_append_datato_frags(sk,skb, getfrag, from, 1108 err = skb_append_datato_frags(sk,skb, getfrag, from,
@@ -1118,6 +1159,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1118 int err; 1159 int err;
1119 int offset = 0; 1160 int offset = 0;
1120 int csummode = CHECKSUM_NONE; 1161 int csummode = CHECKSUM_NONE;
1162 __u8 tx_flags = 0;
1121 1163
1122 if (flags&MSG_PROBE) 1164 if (flags&MSG_PROBE)
1123 return 0; 1165 return 0;
@@ -1202,6 +1244,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1202 } 1244 }
1203 } 1245 }
1204 1246
1247 /* For UDP, check if TX timestamp is enabled */
1248 if (sk->sk_type == SOCK_DGRAM) {
1249 err = sock_tx_timestamp(sk, &tx_flags);
1250 if (err)
1251 goto error;
1252 }
1253
1205 /* 1254 /*
1206 * Let's try using as much space as possible. 1255 * Let's try using as much space as possible.
1207 * Use MTU if total length of the message fits into the MTU. 1256 * Use MTU if total length of the message fits into the MTU.
@@ -1306,6 +1355,12 @@ alloc_new_skb:
1306 sk->sk_allocation); 1355 sk->sk_allocation);
1307 if (unlikely(skb == NULL)) 1356 if (unlikely(skb == NULL))
1308 err = -ENOBUFS; 1357 err = -ENOBUFS;
1358 else {
1359 /* Only the initial fragment
1360 * is time stamped.
1361 */
1362 tx_flags = 0;
1363 }
1309 } 1364 }
1310 if (skb == NULL) 1365 if (skb == NULL)
1311 goto error; 1366 goto error;
@@ -1317,6 +1372,9 @@ alloc_new_skb:
1317 /* reserve for fragmentation */ 1372 /* reserve for fragmentation */
1318 skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); 1373 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1319 1374
1375 if (sk->sk_type == SOCK_DGRAM)
1376 skb_shinfo(skb)->tx_flags = tx_flags;
1377
1320 /* 1378 /*
1321 * Find where to start putting bytes 1379 * Find where to start putting bytes
1322 */ 1380 */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e697bd..02730ef26b0f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -581,7 +581,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
581 fl.fl4_dst = eiph->saddr; 581 fl.fl4_dst = eiph->saddr;
582 fl.fl4_tos = RT_TOS(eiph->tos); 582 fl.fl4_tos = RT_TOS(eiph->tos);
583 fl.proto = IPPROTO_IPIP; 583 fl.proto = IPPROTO_IPIP;
584 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) 584 rt = ip_route_output_key(dev_net(skb->dev), &fl);
585 if (IS_ERR(rt))
585 goto out; 586 goto out;
586 587
587 skb2->dev = rt->dst.dev; 588 skb2->dev = rt->dst.dev;
@@ -593,12 +594,14 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
593 fl.fl4_dst = eiph->daddr; 594 fl.fl4_dst = eiph->daddr;
594 fl.fl4_src = eiph->saddr; 595 fl.fl4_src = eiph->saddr;
595 fl.fl4_tos = eiph->tos; 596 fl.fl4_tos = eiph->tos;
596 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || 597 rt = ip_route_output_key(dev_net(skb->dev), &fl);
598 if (IS_ERR(rt) ||
597 rt->dst.dev->type != ARPHRD_TUNNEL) { 599 rt->dst.dev->type != ARPHRD_TUNNEL) {
598 ip_rt_put(rt); 600 if (!IS_ERR(rt))
601 ip_rt_put(rt);
599 goto out; 602 goto out;
600 } 603 }
601 skb_dst_set(skb2, (struct dst_entry *)rt); 604 skb_dst_set(skb2, &rt->dst);
602 } else { 605 } else {
603 ip_rt_put(rt); 606 ip_rt_put(rt);
604 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 607 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
@@ -903,8 +906,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
903 else { 906 else {
904 dst = ip6_route_output(net, NULL, fl); 907 dst = ip6_route_output(net, NULL, fl);
905 908
906 if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0) 909 if (dst->error)
907 goto tx_err_link_failure; 910 goto tx_err_link_failure;
911 dst = xfrm_lookup(net, dst, fl, NULL, 0);
912 if (IS_ERR(dst)) {
913 err = PTR_ERR(dst);
914 dst = NULL;
915 goto tx_err_link_failure;
916 }
908 } 917 }
909 918
910 tdev = dst->dev; 919 tdev = dst->dev;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 49f986d626a0..f2c9b6930ffc 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -319,7 +319,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
319{ 319{
320 struct in6_addr *source, *group; 320 struct in6_addr *source, *group;
321 struct ipv6_mc_socklist *pmc; 321 struct ipv6_mc_socklist *pmc;
322 struct net_device *dev;
323 struct inet6_dev *idev; 322 struct inet6_dev *idev;
324 struct ipv6_pinfo *inet6 = inet6_sk(sk); 323 struct ipv6_pinfo *inet6 = inet6_sk(sk);
325 struct ip6_sf_socklist *psl; 324 struct ip6_sf_socklist *psl;
@@ -341,7 +340,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
341 rcu_read_unlock(); 340 rcu_read_unlock();
342 return -ENODEV; 341 return -ENODEV;
343 } 342 }
344 dev = idev->dev;
345 343
346 err = -EADDRNOTAVAIL; 344 err = -EADDRNOTAVAIL;
347 345
@@ -455,7 +453,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
455{ 453{
456 struct in6_addr *group; 454 struct in6_addr *group;
457 struct ipv6_mc_socklist *pmc; 455 struct ipv6_mc_socklist *pmc;
458 struct net_device *dev;
459 struct inet6_dev *idev; 456 struct inet6_dev *idev;
460 struct ipv6_pinfo *inet6 = inet6_sk(sk); 457 struct ipv6_pinfo *inet6 = inet6_sk(sk);
461 struct ip6_sf_socklist *newpsl, *psl; 458 struct ip6_sf_socklist *newpsl, *psl;
@@ -478,7 +475,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
478 rcu_read_unlock(); 475 rcu_read_unlock();
479 return -ENODEV; 476 return -ENODEV;
480 } 477 }
481 dev = idev->dev;
482 478
483 err = 0; 479 err = 0;
484 480
@@ -549,7 +545,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
549 struct in6_addr *group; 545 struct in6_addr *group;
550 struct ipv6_mc_socklist *pmc; 546 struct ipv6_mc_socklist *pmc;
551 struct inet6_dev *idev; 547 struct inet6_dev *idev;
552 struct net_device *dev;
553 struct ipv6_pinfo *inet6 = inet6_sk(sk); 548 struct ipv6_pinfo *inet6 = inet6_sk(sk);
554 struct ip6_sf_socklist *psl; 549 struct ip6_sf_socklist *psl;
555 struct net *net = sock_net(sk); 550 struct net *net = sock_net(sk);
@@ -566,7 +561,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
566 rcu_read_unlock(); 561 rcu_read_unlock();
567 return -ENODEV; 562 return -ENODEV;
568 } 563 }
569 dev = idev->dev;
570 564
571 err = -EADDRNOTAVAIL; 565 err = -EADDRNOTAVAIL;
572 /* 566 /*
@@ -1429,7 +1423,12 @@ static void mld_sendpack(struct sk_buff *skb)
1429 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1423 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1430 skb->dev->ifindex); 1424 skb->dev->ifindex);
1431 1425
1432 err = xfrm_lookup(net, &dst, &fl, NULL, 0); 1426 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
1427 err = 0;
1428 if (IS_ERR(dst)) {
1429 err = PTR_ERR(dst);
1430 dst = NULL;
1431 }
1433 skb_dst_set(skb, dst); 1432 skb_dst_set(skb, dst);
1434 if (err) 1433 if (err)
1435 goto err_out; 1434 goto err_out;
@@ -1796,9 +1795,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1796 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1795 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1797 skb->dev->ifindex); 1796 skb->dev->ifindex);
1798 1797
1799 err = xfrm_lookup(net, &dst, &fl, NULL, 0); 1798 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
1800 if (err) 1799 if (IS_ERR(dst)) {
1800 err = PTR_ERR(dst);
1801 goto err_out; 1801 goto err_out;
1802 }
1802 1803
1803 skb_dst_set(skb, dst); 1804 skb_dst_set(skb, dst);
1804 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1805 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7254ce364006..9360d3be94f0 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -529,8 +529,8 @@ void ndisc_send_skb(struct sk_buff *skb,
529 return; 529 return;
530 } 530 }
531 531
532 err = xfrm_lookup(net, &dst, &fl, NULL, 0); 532 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
533 if (err < 0) { 533 if (IS_ERR(dst)) {
534 kfree_skb(skb); 534 kfree_skb(skb);
535 return; 535 return;
536 } 536 }
@@ -1542,8 +1542,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1542 if (dst == NULL) 1542 if (dst == NULL)
1543 return; 1543 return;
1544 1544
1545 err = xfrm_lookup(net, &dst, &fl, NULL, 0); 1545 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
1546 if (err) 1546 if (IS_ERR(dst))
1547 return; 1547 return;
1548 1548
1549 rt = (struct rt6_info *) dst; 1549 rt = (struct rt6_info *) dst;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 35915e8617f0..8d74116ae27d 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -39,7 +39,8 @@ int ip6_route_me_harder(struct sk_buff *skb)
39 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 39 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
40 xfrm_decode_session(skb, &fl, AF_INET6) == 0) { 40 xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
41 skb_dst_set(skb, NULL); 41 skb_dst_set(skb, NULL);
42 if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) 42 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
43 if (IS_ERR(dst))
43 return -1; 44 return -1;
44 skb_dst_set(skb, dst); 45 skb_dst_set(skb, dst);
45 } 46 }
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index bf998feac14e..91f6a61cefab 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -101,7 +101,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
101 dst_release(dst); 101 dst_release(dst);
102 return; 102 return;
103 } 103 }
104 if (xfrm_lookup(net, &dst, &fl, NULL, 0)) 104 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
105 if (IS_ERR(dst))
105 return; 106 return;
106 107
107 hh_len = (dst->dev->hard_header_len + 15)&~15; 108 hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 364e86683388..dc29b07caf42 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -856,20 +856,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
856 fl.oif = np->mcast_oif; 856 fl.oif = np->mcast_oif;
857 security_sk_classify_flow(sk, &fl); 857 security_sk_classify_flow(sk, &fl);
858 858
859 err = ip6_dst_lookup(sk, &dst, &fl); 859 dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
860 if (err) 860 if (IS_ERR(dst)) {
861 err = PTR_ERR(dst);
861 goto out; 862 goto out;
862 if (final_p)
863 ipv6_addr_copy(&fl.fl6_dst, final_p);
864
865 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
866 if (err < 0) {
867 if (err == -EREMOTE)
868 err = ip6_dst_blackhole(sk, &dst, &fl);
869 if (err < 0)
870 goto out;
871 } 863 }
872
873 if (hlimit < 0) { 864 if (hlimit < 0) {
874 if (ipv6_addr_is_multicast(&fl.fl6_dst)) 865 if (ipv6_addr_is_multicast(&fl.fl6_dst))
875 hlimit = np->mcast_hops; 866 hlimit = np->mcast_hops;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7e9443f835f9..d55d00c2a824 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -870,11 +870,10 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
870 870
871EXPORT_SYMBOL(ip6_route_output); 871EXPORT_SYMBOL(ip6_route_output);
872 872
873int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl) 873struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
874{ 874{
875 struct rt6_info *ort = (struct rt6_info *) *dstp; 875 struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1);
876 struct rt6_info *rt = (struct rt6_info *) 876 struct rt6_info *ort = (struct rt6_info *) dst_orig;
877 dst_alloc(&ip6_dst_blackhole_ops, 1);
878 struct dst_entry *new = NULL; 877 struct dst_entry *new = NULL;
879 878
880 if (rt) { 879 if (rt) {
@@ -905,11 +904,9 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
905 dst_free(new); 904 dst_free(new);
906 } 905 }
907 906
908 dst_release(*dstp); 907 dst_release(dst_orig);
909 *dstp = new; 908 return new ? new : ERR_PTR(-ENOMEM);
910 return new ? 0 : -ENOMEM;
911} 909}
912EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
913 910
914/* 911/*
915 * Destination cache support functions 912 * Destination cache support functions
@@ -2025,12 +2022,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2025 if (IS_ERR(neigh)) { 2022 if (IS_ERR(neigh)) {
2026 dst_free(&rt->dst); 2023 dst_free(&rt->dst);
2027 2024
2028 /* We are casting this because that is the return 2025 return ERR_CAST(neigh);
2029 * value type. But an errno encoded pointer is the
2030 * same regardless of the underlying pointer type,
2031 * and that's what we are returning. So this is OK.
2032 */
2033 return (struct rt6_info *) neigh;
2034 } 2026 }
2035 rt->rt6i_nexthop = neigh; 2027 rt->rt6i_nexthop = neigh;
2036 2028
@@ -2602,14 +2594,16 @@ static
2602int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2594int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2603 void __user *buffer, size_t *lenp, loff_t *ppos) 2595 void __user *buffer, size_t *lenp, loff_t *ppos)
2604{ 2596{
2605 struct net *net = current->nsproxy->net_ns; 2597 struct net *net;
2606 int delay = net->ipv6.sysctl.flush_delay; 2598 int delay;
2607 if (write) { 2599 if (!write)
2608 proc_dointvec(ctl, write, buffer, lenp, ppos);
2609 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2610 return 0;
2611 } else
2612 return -EINVAL; 2600 return -EINVAL;
2601
2602 net = (struct net *)ctl->extra1;
2603 delay = net->ipv6.sysctl.flush_delay;
2604 proc_dointvec(ctl, write, buffer, lenp, ppos);
2605 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2606 return 0;
2613} 2607}
2614 2608
2615ctl_table ipv6_route_table_template[] = { 2609ctl_table ipv6_route_table_template[] = {
@@ -2696,6 +2690,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2696 2690
2697 if (table) { 2691 if (table) {
2698 table[0].data = &net->ipv6.sysctl.flush_delay; 2692 table[0].data = &net->ipv6.sysctl.flush_delay;
2693 table[0].extra1 = net;
2699 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2694 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2700 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2695 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2701 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2696 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b1599a345c10..b8c8adbd7cf6 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -738,7 +738,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
738 .fl4_tos = RT_TOS(tos), 738 .fl4_tos = RT_TOS(tos),
739 .oif = tunnel->parms.link, 739 .oif = tunnel->parms.link,
740 .proto = IPPROTO_IPV6 }; 740 .proto = IPPROTO_IPV6 };
741 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 741 rt = ip_route_output_key(dev_net(dev), &fl);
742 if (IS_ERR(rt)) {
742 dev->stats.tx_carrier_errors++; 743 dev->stats.tx_carrier_errors++;
743 goto tx_error_icmp; 744 goto tx_error_icmp;
744 } 745 }
@@ -862,8 +863,9 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
862 .fl4_tos = RT_TOS(iph->tos), 863 .fl4_tos = RT_TOS(iph->tos),
863 .oif = tunnel->parms.link, 864 .oif = tunnel->parms.link,
864 .proto = IPPROTO_IPV6 }; 865 .proto = IPPROTO_IPV6 };
865 struct rtable *rt; 866 struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
866 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 867
868 if (!IS_ERR(rt)) {
867 tdev = rt->dst.dev; 869 tdev = rt->dst.dev;
868 ip_rt_put(rt); 870 ip_rt_put(rt);
869 } 871 }
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 09fd34f0dbf2..0b4cf350631b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -243,12 +243,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
243 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 243 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
244 fl.fl_ip_sport = inet_sk(sk)->inet_sport; 244 fl.fl_ip_sport = inet_sk(sk)->inet_sport;
245 security_req_classify_flow(req, &fl); 245 security_req_classify_flow(req, &fl);
246 if (ip6_dst_lookup(sk, &dst, &fl))
247 goto out_free;
248 246
249 if (final_p) 247 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
250 ipv6_addr_copy(&fl.fl6_dst, final_p); 248 if (IS_ERR(dst))
251 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
252 goto out_free; 249 goto out_free;
253 } 250 }
254 251
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1d0ab5570904..e59a31c48baf 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -255,18 +255,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
255 255
256 security_sk_classify_flow(sk, &fl); 256 security_sk_classify_flow(sk, &fl);
257 257
258 err = ip6_dst_lookup(sk, &dst, &fl); 258 dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
259 if (err) 259 if (IS_ERR(dst)) {
260 err = PTR_ERR(dst);
260 goto failure; 261 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
270 } 262 }
271 263
272 if (saddr == NULL) { 264 if (saddr == NULL) {
@@ -385,7 +377,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
385 np = inet6_sk(sk); 377 np = inet6_sk(sk);
386 378
387 if (type == ICMPV6_PKT_TOOBIG) { 379 if (type == ICMPV6_PKT_TOOBIG) {
388 struct dst_entry *dst = NULL; 380 struct dst_entry *dst;
389 381
390 if (sock_owned_by_user(sk)) 382 if (sock_owned_by_user(sk))
391 goto out; 383 goto out;
@@ -413,13 +405,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
413 fl.fl_ip_sport = inet->inet_sport; 405 fl.fl_ip_sport = inet->inet_sport;
414 security_skb_classify_flow(skb, &fl); 406 security_skb_classify_flow(skb, &fl);
415 407
416 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 408 dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
417 sk->sk_err_soft = -err; 409 if (IS_ERR(dst)) {
418 goto out; 410 sk->sk_err_soft = -PTR_ERR(dst);
419 }
420
421 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
422 sk->sk_err_soft = -err;
423 goto out; 411 goto out;
424 } 412 }
425 413
@@ -496,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
496 struct in6_addr * final_p, final; 484 struct in6_addr * final_p, final;
497 struct flowi fl; 485 struct flowi fl;
498 struct dst_entry *dst; 486 struct dst_entry *dst;
499 int err = -1; 487 int err;
500 488
501 memset(&fl, 0, sizeof(fl)); 489 memset(&fl, 0, sizeof(fl));
502 fl.proto = IPPROTO_TCP; 490 fl.proto = IPPROTO_TCP;
@@ -512,15 +500,13 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
512 opt = np->opt; 500 opt = np->opt;
513 final_p = fl6_update_dst(&fl, opt, &final); 501 final_p = fl6_update_dst(&fl, opt, &final);
514 502
515 err = ip6_dst_lookup(sk, &dst, &fl); 503 dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
516 if (err) 504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst);
517 goto done; 506 goto done;
518 if (final_p) 507 }
519 ipv6_addr_copy(&fl.fl6_dst, final_p);
520 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
521 goto done;
522
523 skb = tcp_make_synack(sk, dst, req, rvp); 508 skb = tcp_make_synack(sk, dst, req, rvp);
509 err = -ENOMEM;
524 if (skb) { 510 if (skb) {
525 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 511 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
526 512
@@ -1079,15 +1065,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1079 * Underlying function will use this to retrieve the network 1065 * Underlying function will use this to retrieve the network
1080 * namespace 1066 * namespace
1081 */ 1067 */
1082 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 1068 dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
1083 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 1069 if (!IS_ERR(dst)) {
1084 skb_dst_set(buff, dst); 1070 skb_dst_set(buff, dst);
1085 ip6_xmit(ctl_sk, buff, &fl, NULL); 1071 ip6_xmit(ctl_sk, buff, &fl, NULL);
1086 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1072 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1087 if (rst) 1073 if (rst)
1088 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1089 return; 1075 return;
1090 }
1091 } 1076 }
1092 1077
1093 kfree_skb(buff); 1078 kfree_skb(buff);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a419a787eb69..d86d7f67a597 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1125,18 +1125,11 @@ do_udp_sendmsg:
1125 1125
1126 security_sk_classify_flow(sk, &fl); 1126 security_sk_classify_flow(sk, &fl);
1127 1127
1128 err = ip6_sk_dst_lookup(sk, &dst, &fl); 1128 dst = ip6_sk_dst_lookup_flow(sk, &fl, final_p, true);
1129 if (err) 1129 if (IS_ERR(dst)) {
1130 err = PTR_ERR(dst);
1131 dst = NULL;
1130 goto out; 1132 goto out;
1131 if (final_p)
1132 ipv6_addr_copy(&fl.fl6_dst, final_p);
1133
1134 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
1135 if (err < 0) {
1136 if (err == -EREMOTE)
1137 err = ip6_dst_blackhole(sk, &dst, &fl);
1138 if (err < 0)
1139 goto out;
1140 } 1133 }
1141 1134
1142 if (hlimit < 0) { 1135 if (hlimit < 0) {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c128ca1affe3..48ce496802fd 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -274,6 +274,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
274 .get_tos = xfrm6_get_tos, 274 .get_tos = xfrm6_get_tos,
275 .init_path = xfrm6_init_path, 275 .init_path = xfrm6_init_path,
276 .fill_dst = xfrm6_fill_dst, 276 .fill_dst = xfrm6_fill_dst,
277 .blackhole_route = ip6_blackhole_route,
277}; 278};
278 279
279static int __init xfrm6_policy_init(void) 280static int __init xfrm6_policy_init(void)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7fb54577f5bd..7db86ffcf070 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2560,7 +2560,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2560} 2560}
2561#else 2561#else
2562static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, 2562static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2563 struct sadb_msg *hdr, void **ext_hdrs) 2563 const struct sadb_msg *hdr, void * const *ext_hdrs)
2564{ 2564{
2565 return -ENOPROTOOPT; 2565 return -ENOPROTOOPT;
2566} 2566}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 110efb704c9b..2a698ff89db6 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -320,11 +320,12 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
321 goto out; 321 goto out;
322 322
323 rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr, 323 rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr,
324 RT_CONN_FLAGS(sk), oif, 324 RT_CONN_FLAGS(sk), oif,
325 IPPROTO_L2TP, 325 IPPROTO_L2TP,
326 0, 0, sk, 1); 326 0, 0, sk, true);
327 if (rc) { 327 if (IS_ERR(rt)) {
328 rc = PTR_ERR(rt);
328 if (rc == -ENETUNREACH) 329 if (rc == -ENETUNREACH)
329 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); 330 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
330 goto out; 331 goto out;
@@ -489,7 +490,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
489 * itself out. 490 * itself out.
490 */ 491 */
491 security_sk_classify_flow(sk, &fl); 492 security_sk_classify_flow(sk, &fl);
492 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) 493 rt = ip_route_output_flow(sock_net(sk), &fl, sk);
494 if (IS_ERR(rt))
493 goto no_route; 495 goto no_route;
494 } 496 }
495 sk_setup_caps(sk, &rt->dst); 497 sk_setup_caps(sk, &rt->dst);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index f99687439139..058f1e9a9128 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -181,25 +181,26 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
181 * LLC functionality 181 * LLC functionality
182 */ 182 */
183 rcv = rcu_dereference(sap->rcv_func); 183 rcv = rcu_dereference(sap->rcv_func);
184 if (rcv) {
185 struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
186 if (cskb)
187 rcv(cskb, dev, pt, orig_dev);
188 }
189 dest = llc_pdu_type(skb); 184 dest = llc_pdu_type(skb);
190 if (unlikely(!dest || !llc_type_handlers[dest - 1])) 185 if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
191 goto drop_put; 186 if (rcv)
192 llc_type_handlers[dest - 1](sap, skb); 187 rcv(skb, dev, pt, orig_dev);
193out_put: 188 else
189 kfree_skb(skb);
190 } else {
191 if (rcv) {
192 struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
193 if (cskb)
194 rcv(cskb, dev, pt, orig_dev);
195 }
196 llc_type_handlers[dest - 1](sap, skb);
197 }
194 llc_sap_put(sap); 198 llc_sap_put(sap);
195out: 199out:
196 return 0; 200 return 0;
197drop: 201drop:
198 kfree_skb(skb); 202 kfree_skb(skb);
199 goto out; 203 goto out;
200drop_put:
201 kfree_skb(skb);
202 goto out_put;
203handle_station: 204handle_station:
204 if (!llc_station_handler) 205 if (!llc_station_handler)
205 goto drop; 206 goto drop;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index dbf5e4006bc1..513f85cc2ae1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -78,7 +78,7 @@ config MAC80211_RC_DEFAULT
78endif 78endif
79 79
80comment "Some wireless drivers require a rate control algorithm" 80comment "Some wireless drivers require a rate control algorithm"
81 depends on MAC80211_HAS_RC=n 81 depends on MAC80211 && MAC80211_HAS_RC=n
82 82
83config MAC80211_MESH 83config MAC80211_MESH
84 bool "Enable mac80211 mesh networking (pre-802.11s) support" 84 bool "Enable mac80211 mesh networking (pre-802.11s) support"
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 140503d4c97a..7b701dcddb50 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -316,6 +316,17 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
316 return 0; 316 return 0;
317} 317}
318 318
319static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
320{
321 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
322 struct ieee80211_supported_band *sband;
323 sband = sta->local->hw.wiphy->bands[
324 sta->local->hw.conf.channel->band];
325 rate->legacy = sband->bitrates[idx].bitrate;
326 } else
327 rate->mcs = idx;
328}
329
319static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 330static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
320{ 331{
321 struct ieee80211_sub_if_data *sdata = sta->sdata; 332 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -330,6 +341,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
330 STATION_INFO_TX_RETRIES | 341 STATION_INFO_TX_RETRIES |
331 STATION_INFO_TX_FAILED | 342 STATION_INFO_TX_FAILED |
332 STATION_INFO_TX_BITRATE | 343 STATION_INFO_TX_BITRATE |
344 STATION_INFO_RX_BITRATE |
333 STATION_INFO_RX_DROP_MISC; 345 STATION_INFO_RX_DROP_MISC;
334 346
335 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 347 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
@@ -355,15 +367,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
355 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; 367 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
356 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI) 368 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
357 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 369 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
370 rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
358 371
359 if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) { 372 sinfo->rxrate.flags = 0;
360 struct ieee80211_supported_band *sband; 373 if (sta->last_rx_rate_flag & RX_FLAG_HT)
361 sband = sta->local->hw.wiphy->bands[ 374 sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
362 sta->local->hw.conf.channel->band]; 375 if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
363 sinfo->txrate.legacy = 376 sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
364 sband->bitrates[sta->last_tx_rate.idx].bitrate; 377 if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
365 } else 378 sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
366 sinfo->txrate.mcs = sta->last_tx_rate.idx; 379 rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
367 380
368 if (ieee80211_vif_is_mesh(&sdata->vif)) { 381 if (ieee80211_vif_is_mesh(&sdata->vif)) {
369#ifdef CONFIG_MAC80211_MESH 382#ifdef CONFIG_MAC80211_MESH
@@ -1800,6 +1813,33 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1800 1813
1801 *cookie = (unsigned long) skb; 1814 *cookie = (unsigned long) skb;
1802 1815
1816 if (is_offchan && local->ops->offchannel_tx) {
1817 int ret;
1818
1819 IEEE80211_SKB_CB(skb)->band = chan->band;
1820
1821 mutex_lock(&local->mtx);
1822
1823 if (local->hw_offchan_tx_cookie) {
1824 mutex_unlock(&local->mtx);
1825 return -EBUSY;
1826 }
1827
1828 /* TODO: bitrate control, TX processing? */
1829 ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
1830
1831 if (ret == 0)
1832 local->hw_offchan_tx_cookie = *cookie;
1833 mutex_unlock(&local->mtx);
1834
1835 /*
1836 * Allow driver to return 1 to indicate it wants to have the
1837 * frame transmitted with a remain_on_channel + regular TX.
1838 */
1839 if (ret != 1)
1840 return ret;
1841 }
1842
1803 if (is_offchan && local->ops->remain_on_channel) { 1843 if (is_offchan && local->ops->remain_on_channel) {
1804 unsigned int duration; 1844 unsigned int duration;
1805 int ret; 1845 int ret;
@@ -1886,6 +1926,18 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
1886 1926
1887 mutex_lock(&local->mtx); 1927 mutex_lock(&local->mtx);
1888 1928
1929 if (local->ops->offchannel_tx_cancel_wait &&
1930 local->hw_offchan_tx_cookie == cookie) {
1931 ret = drv_offchannel_tx_cancel_wait(local);
1932
1933 if (!ret)
1934 local->hw_offchan_tx_cookie = 0;
1935
1936 mutex_unlock(&local->mtx);
1937
1938 return ret;
1939 }
1940
1889 if (local->ops->cancel_remain_on_channel) { 1941 if (local->ops->cancel_remain_on_channel) {
1890 cookie ^= 2; 1942 cookie ^= 2;
1891 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); 1943 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 78af32d4bc58..3729296f6f95 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,9 +5,9 @@
5#include "ieee80211_i.h" 5#include "ieee80211_i.h"
6#include "driver-trace.h" 6#include "driver-trace.h"
7 7
8static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb) 8static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
9{ 9{
10 return local->ops->tx(&local->hw, skb); 10 local->ops->tx(&local->hw, skb);
11} 11}
12 12
13static inline int drv_start(struct ieee80211_local *local) 13static inline int drv_start(struct ieee80211_local *local)
@@ -495,4 +495,35 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
495 return ret; 495 return ret;
496} 496}
497 497
498static inline int drv_offchannel_tx(struct ieee80211_local *local,
499 struct sk_buff *skb,
500 struct ieee80211_channel *chan,
501 enum nl80211_channel_type channel_type,
502 unsigned int wait)
503{
504 int ret;
505
506 might_sleep();
507
508 trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
509 ret = local->ops->offchannel_tx(&local->hw, skb, chan,
510 channel_type, wait);
511 trace_drv_return_int(local, ret);
512
513 return ret;
514}
515
516static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
517{
518 int ret;
519
520 might_sleep();
521
522 trace_drv_offchannel_tx_cancel_wait(local);
523 ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
524 trace_drv_return_int(local, ret);
525
526 return ret;
527}
528
498#endif /* __MAC80211_DRIVER_OPS */ 529#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index e5cce19a7d65..520fe2444893 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -884,6 +884,39 @@ DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
884 TP_ARGS(local) 884 TP_ARGS(local)
885); 885);
886 886
887TRACE_EVENT(drv_offchannel_tx,
888 TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
889 struct ieee80211_channel *chan,
890 enum nl80211_channel_type channel_type,
891 unsigned int wait),
892
893 TP_ARGS(local, skb, chan, channel_type, wait),
894
895 TP_STRUCT__entry(
896 LOCAL_ENTRY
897 __field(int, center_freq)
898 __field(int, channel_type)
899 __field(unsigned int, wait)
900 ),
901
902 TP_fast_assign(
903 LOCAL_ASSIGN;
904 __entry->center_freq = chan->center_freq;
905 __entry->channel_type = channel_type;
906 __entry->wait = wait;
907 ),
908
909 TP_printk(
910 LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
911 LOCAL_PR_ARG, __entry->center_freq, __entry->wait
912 )
913);
914
915DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
916 TP_PROTO(struct ieee80211_local *local),
917 TP_ARGS(local)
918);
919
887/* 920/*
888 * Tracing for API calls that drivers call. 921 * Tracing for API calls that drivers call.
889 */ 922 */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 75d679d75e63..b9e4b9bd2179 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -66,6 +66,9 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
66 /* own MCS TX capabilities */ 66 /* own MCS TX capabilities */
67 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; 67 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
68 68
69 /* Copy peer MCS TX capabilities, the driver might need them. */
70 ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params;
71
69 /* can we TX with MCS rates? */ 72 /* can we TX with MCS rates? */
70 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) 73 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
71 return; 74 return;
@@ -79,7 +82,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
79 max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; 82 max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS;
80 83
81 /* 84 /*
82 * 802.11n D5.0 20.3.5 / 20.6 says: 85 * 802.11n-2009 20.3.5 / 20.6 says:
83 * - indices 0 to 7 and 32 are single spatial stream 86 * - indices 0 to 7 and 32 are single spatial stream
84 * - 8 to 31 are multiple spatial streams using equal modulation 87 * - 8 to 31 are multiple spatial streams using equal modulation
85 * [8..15 for two streams, 16..23 for three and 24..31 for four] 88 * [8..15 for two streams, 16..23 for three and 24..31 for four]
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a42aa61269ea..3e81af1fce58 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -31,7 +31,6 @@
31#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) 31#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
32 32
33#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) 33#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
34#define IEEE80211_IBSS_MERGE_DELAY 0x400000
35#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) 34#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
36 35
37#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 36#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
@@ -355,7 +354,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
355 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 354 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
356 goto put_bss; 355 goto put_bss;
357 356
358 if (rx_status->flag & RX_FLAG_TSFT) { 357 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
359 /* 358 /*
360 * For correct IBSS merging we need mactime; since mactime is 359 * For correct IBSS merging we need mactime; since mactime is
361 * defined as the time the first data symbol of the frame hits 360 * defined as the time the first data symbol of the frame hits
@@ -397,10 +396,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
397 jiffies); 396 jiffies);
398#endif 397#endif
399 398
400 /* give slow hardware some time to do the TSF sync */
401 if (rx_timestamp < IEEE80211_IBSS_MERGE_DELAY)
402 goto put_bss;
403
404 if (beacon_timestamp > rx_timestamp) { 399 if (beacon_timestamp > rx_timestamp) {
405#ifdef CONFIG_MAC80211_IBSS_DEBUG 400#ifdef CONFIG_MAC80211_IBSS_DEBUG
406 printk(KERN_DEBUG "%s: beacon TSF higher than " 401 printk(KERN_DEBUG "%s: beacon TSF higher than "
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0a570a111a84..a40401701424 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -957,6 +957,7 @@ struct ieee80211_local {
957 unsigned int hw_roc_duration; 957 unsigned int hw_roc_duration;
958 u32 hw_roc_cookie; 958 u32 hw_roc_cookie;
959 bool hw_roc_for_tx; 959 bool hw_roc_for_tx;
960 unsigned long hw_offchan_tx_cookie;
960 961
961 /* dummy netdev for use w/ NAPI */ 962 /* dummy netdev for use w/ NAPI */
962 struct net_device napi_dev; 963 struct net_device napi_dev;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5a4e19b88032..4054399be907 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1236,6 +1236,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1236 } 1236 }
1237 mutex_unlock(&local->iflist_mtx); 1237 mutex_unlock(&local->iflist_mtx);
1238 unregister_netdevice_many(&unreg_list); 1238 unregister_netdevice_many(&unreg_list);
1239 list_del(&unreg_list);
1239} 1240}
1240 1241
1241static u32 ieee80211_idle_off(struct ieee80211_local *local, 1242static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7b3f9df725bd..cc984bd861cf 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -145,6 +145,9 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
145{ 145{
146 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 146 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
147 147
148 if (unlikely(!sdata->u.mgd.associated))
149 return;
150
148 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 151 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
149 return; 152 return;
150 153
@@ -738,9 +741,19 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
738 return; 741 return;
739 742
740 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 743 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
741 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) 744 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
745 netif_tx_stop_all_queues(sdata->dev);
746 /*
747 * Flush all the frames queued in the driver before
748 * going to power save
749 */
750 drv_flush(local, false);
742 ieee80211_send_nullfunc(local, sdata, 1); 751 ieee80211_send_nullfunc(local, sdata, 1);
743 752
753 /* Flush once again to get the tx status of nullfunc frame */
754 drv_flush(local, false);
755 }
756
744 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && 757 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
745 (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) || 758 (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
746 (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { 759 (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
@@ -748,6 +761,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
748 local->hw.conf.flags |= IEEE80211_CONF_PS; 761 local->hw.conf.flags |= IEEE80211_CONF_PS;
749 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 762 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
750 } 763 }
764
765 netif_tx_start_all_queues(sdata->dev);
751} 766}
752 767
753void ieee80211_dynamic_ps_timer(unsigned long data) 768void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1071,12 +1086,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1071 if (is_multicast_ether_addr(hdr->addr1)) 1086 if (is_multicast_ether_addr(hdr->addr1))
1072 return; 1087 return;
1073 1088
1074 /*
1075 * In case we receive frames after disassociation.
1076 */
1077 if (!sdata->u.mgd.associated)
1078 return;
1079
1080 ieee80211_sta_reset_conn_monitor(sdata); 1089 ieee80211_sta_reset_conn_monitor(sdata);
1081} 1090}
1082 1091
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f502634d43af..5c1930ba8ebe 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -77,7 +77,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
77 /* always present fields */ 77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9; 78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79 79
80 if (status->flag & RX_FLAG_TSFT) 80 if (status->flag & RX_FLAG_MACTIME_MPDU)
81 len += 8; 81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1; 83 len += 1;
@@ -123,7 +123,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
123 /* the order of the following fields is important */ 123 /* the order of the following fields is important */
124 124
125 /* IEEE80211_RADIOTAP_TSFT */ 125 /* IEEE80211_RADIOTAP_TSFT */
126 if (status->flag & RX_FLAG_TSFT) { 126 if (status->flag & RX_FLAG_MACTIME_MPDU) {
127 put_unaligned_le64(status->mactime, pos); 127 put_unaligned_le64(status->mactime, pos);
128 rthdr->it_present |= 128 rthdr->it_present |=
129 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 129 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
@@ -1156,14 +1156,23 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1156 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1156 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1157 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1157 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1158 NL80211_IFTYPE_ADHOC); 1158 NL80211_IFTYPE_ADHOC);
1159 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) 1159 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1160 sta->last_rx = jiffies; 1160 sta->last_rx = jiffies;
1161 if (ieee80211_is_data(hdr->frame_control)) {
1162 sta->last_rx_rate_idx = status->rate_idx;
1163 sta->last_rx_rate_flag = status->flag;
1164 }
1165 }
1161 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1166 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1162 /* 1167 /*
1163 * Mesh beacons will update last_rx when if they are found to 1168 * Mesh beacons will update last_rx when if they are found to
1164 * match the current local configuration when processed. 1169 * match the current local configuration when processed.
1165 */ 1170 */
1166 sta->last_rx = jiffies; 1171 sta->last_rx = jiffies;
1172 if (ieee80211_is_data(hdr->frame_control)) {
1173 sta->last_rx_rate_idx = status->rate_idx;
1174 sta->last_rx_rate_flag = status->flag;
1175 }
1167 } 1176 }
1168 1177
1169 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1178 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ca0b69060ef7..57681149e37f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -209,6 +209,8 @@ enum plink_state {
209 * @rate_ctrl_priv: rate control private per-STA pointer 209 * @rate_ctrl_priv: rate control private per-STA pointer
210 * @last_tx_rate: rate used for last transmit, to report to userspace as 210 * @last_tx_rate: rate used for last transmit, to report to userspace as
211 * "the" transmit rate 211 * "the" transmit rate
212 * @last_rx_rate_idx: rx status rate index of the last data packet
213 * @last_rx_rate_flag: rx status flag of the last data packet
212 * @lock: used for locking all fields that require locking, see comments 214 * @lock: used for locking all fields that require locking, see comments
213 * in the header file. 215 * in the header file.
214 * @flaglock: spinlock for flags accesses 216 * @flaglock: spinlock for flags accesses
@@ -311,6 +313,8 @@ struct sta_info {
311 unsigned long tx_bytes; 313 unsigned long tx_bytes;
312 unsigned long tx_fragments; 314 unsigned long tx_fragments;
313 struct ieee80211_tx_rate last_tx_rate; 315 struct ieee80211_tx_rate last_tx_rate;
316 int last_rx_rate_idx;
317 int last_rx_rate_flag;
314 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; 318 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
315 319
316 /* 320 /*
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 010a559bd872..b936dd29e92b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -318,8 +318,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
318 if (info->flags & IEEE80211_TX_STAT_ACK) { 318 if (info->flags & IEEE80211_TX_STAT_ACK) {
319 local->ps_sdata->u.mgd.flags |= 319 local->ps_sdata->u.mgd.flags |=
320 IEEE80211_STA_NULLFUNC_ACKED; 320 IEEE80211_STA_NULLFUNC_ACKED;
321 ieee80211_queue_work(&local->hw,
322 &local->dynamic_ps_enable_work);
323 } else 321 } else
324 mod_timer(&local->dynamic_ps_timer, jiffies + 322 mod_timer(&local->dynamic_ps_timer, jiffies +
325 msecs_to_jiffies(10)); 323 msecs_to_jiffies(10));
@@ -343,6 +341,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
343 cookie = local->hw_roc_cookie ^ 2; 341 cookie = local->hw_roc_cookie ^ 2;
344 local->hw_roc_skb_for_status = NULL; 342 local->hw_roc_skb_for_status = NULL;
345 } 343 }
344
345 if (cookie == local->hw_offchan_tx_cookie)
346 local->hw_offchan_tx_cookie = 0;
347
346 cfg80211_mgmt_tx_status( 348 cfg80211_mgmt_tx_status(
347 skb->dev, cookie, skb->data, skb->len, 349 skb->dev, cookie, skb->data, skb->len,
348 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 350 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 34edf7f22b0e..081dcaf6577b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -33,10 +33,6 @@
33#include "wme.h" 33#include "wme.h"
34#include "rate.h" 34#include "rate.h"
35 35
36#define IEEE80211_TX_OK 0
37#define IEEE80211_TX_AGAIN 1
38#define IEEE80211_TX_PENDING 2
39
40/* misc utils */ 36/* misc utils */
41 37
42static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 38static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
@@ -1285,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1285 return TX_CONTINUE; 1281 return TX_CONTINUE;
1286} 1282}
1287 1283
1288static int __ieee80211_tx(struct ieee80211_local *local, 1284/*
1289 struct sk_buff **skbp, 1285 * Returns false if the frame couldn't be transmitted but was queued instead.
1290 struct sta_info *sta, 1286 */
1291 bool txpending) 1287static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1288 struct sta_info *sta, bool txpending)
1292{ 1289{
1293 struct sk_buff *skb = *skbp, *next; 1290 struct sk_buff *skb = *skbp, *next;
1294 struct ieee80211_tx_info *info; 1291 struct ieee80211_tx_info *info;
1295 struct ieee80211_sub_if_data *sdata; 1292 struct ieee80211_sub_if_data *sdata;
1296 unsigned long flags; 1293 unsigned long flags;
1297 int ret, len; 1294 int len;
1298 bool fragm = false; 1295 bool fragm = false;
1299 1296
1300 while (skb) { 1297 while (skb) {
@@ -1302,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1302 __le16 fc; 1299 __le16 fc;
1303 1300
1304 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1301 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1305 ret = IEEE80211_TX_OK;
1306 if (local->queue_stop_reasons[q] || 1302 if (local->queue_stop_reasons[q] ||
1307 (!txpending && !skb_queue_empty(&local->pending[q]))) 1303 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1308 ret = IEEE80211_TX_PENDING; 1304 /*
1305 * Since queue is stopped, queue up frames for later
1306 * transmission from the tx-pending tasklet when the
1307 * queue is woken again.
1308 */
1309
1310 do {
1311 next = skb->next;
1312 skb->next = NULL;
1313 /*
1314 * NB: If txpending is true, next must already
1315 * be NULL since we must've gone through this
1316 * loop before already; therefore we can just
1317 * queue the frame to the head without worrying
1318 * about reordering of fragments.
1319 */
1320 if (unlikely(txpending))
1321 __skb_queue_head(&local->pending[q],
1322 skb);
1323 else
1324 __skb_queue_tail(&local->pending[q],
1325 skb);
1326 } while ((skb = next));
1327
1328 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1329 flags);
1330 return false;
1331 }
1309 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1332 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1310 if (ret != IEEE80211_TX_OK)
1311 return ret;
1312 1333
1313 info = IEEE80211_SKB_CB(skb); 1334 info = IEEE80211_SKB_CB(skb);
1314 1335
@@ -1343,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1343 info->control.sta = NULL; 1364 info->control.sta = NULL;
1344 1365
1345 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 1366 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1346 ret = drv_tx(local, skb); 1367 drv_tx(local, skb);
1347 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1348 dev_kfree_skb(skb);
1349 ret = NETDEV_TX_OK;
1350 }
1351 if (ret != NETDEV_TX_OK) {
1352 info->control.vif = &sdata->vif;
1353 return IEEE80211_TX_AGAIN;
1354 }
1355 1368
1356 ieee80211_tpt_led_trig_tx(local, fc, len); 1369 ieee80211_tpt_led_trig_tx(local, fc, len);
1357 *skbp = skb = next; 1370 *skbp = skb = next;
@@ -1359,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1359 fragm = true; 1372 fragm = true;
1360 } 1373 }
1361 1374
1362 return IEEE80211_TX_OK; 1375 return true;
1363} 1376}
1364 1377
1365/* 1378/*
@@ -1419,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1419 return 0; 1432 return 0;
1420} 1433}
1421 1434
1422static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, 1435/*
1436 * Returns false if the frame couldn't be transmitted but was queued instead.
1437 */
1438static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1423 struct sk_buff *skb, bool txpending) 1439 struct sk_buff *skb, bool txpending)
1424{ 1440{
1425 struct ieee80211_local *local = sdata->local; 1441 struct ieee80211_local *local = sdata->local;
1426 struct ieee80211_tx_data tx; 1442 struct ieee80211_tx_data tx;
1427 ieee80211_tx_result res_prepare; 1443 ieee80211_tx_result res_prepare;
1428 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1429 struct sk_buff *next;
1430 unsigned long flags;
1431 int ret, retries;
1432 u16 queue; 1445 u16 queue;
1446 bool result = true;
1433 1447
1434 queue = skb_get_queue_mapping(skb); 1448 queue = skb_get_queue_mapping(skb);
1435 1449
1436 if (unlikely(skb->len < 10)) { 1450 if (unlikely(skb->len < 10)) {
1437 dev_kfree_skb(skb); 1451 dev_kfree_skb(skb);
1438 return; 1452 return true;
1439 } 1453 }
1440 1454
1441 rcu_read_lock(); 1455 rcu_read_lock();
@@ -1445,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1445 1459
1446 if (unlikely(res_prepare == TX_DROP)) { 1460 if (unlikely(res_prepare == TX_DROP)) {
1447 dev_kfree_skb(skb); 1461 dev_kfree_skb(skb);
1448 rcu_read_unlock(); 1462 goto out;
1449 return;
1450 } else if (unlikely(res_prepare == TX_QUEUED)) { 1463 } else if (unlikely(res_prepare == TX_QUEUED)) {
1451 rcu_read_unlock(); 1464 goto out;
1452 return;
1453 } 1465 }
1454 1466
1455 tx.channel = local->hw.conf.channel; 1467 tx.channel = local->hw.conf.channel;
1456 info->band = tx.channel->band; 1468 info->band = tx.channel->band;
1457 1469
1458 if (invoke_tx_handlers(&tx)) 1470 if (!invoke_tx_handlers(&tx))
1459 goto out; 1471 result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1460
1461 retries = 0;
1462 retry:
1463 ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1464 switch (ret) {
1465 case IEEE80211_TX_OK:
1466 break;
1467 case IEEE80211_TX_AGAIN:
1468 /*
1469 * Since there are no fragmented frames on A-MPDU
1470 * queues, there's no reason for a driver to reject
1471 * a frame there, warn and drop it.
1472 */
1473 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1474 goto drop;
1475 /* fall through */
1476 case IEEE80211_TX_PENDING:
1477 skb = tx.skb;
1478
1479 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1480
1481 if (local->queue_stop_reasons[queue] ||
1482 !skb_queue_empty(&local->pending[queue])) {
1483 /*
1484 * if queue is stopped, queue up frames for later
1485 * transmission from the tasklet
1486 */
1487 do {
1488 next = skb->next;
1489 skb->next = NULL;
1490 if (unlikely(txpending))
1491 __skb_queue_head(&local->pending[queue],
1492 skb);
1493 else
1494 __skb_queue_tail(&local->pending[queue],
1495 skb);
1496 } while ((skb = next));
1497
1498 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1499 flags);
1500 } else {
1501 /*
1502 * otherwise retry, but this is a race condition or
1503 * a driver bug (which we warn about if it persists)
1504 */
1505 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1506 flags);
1507
1508 retries++;
1509 if (WARN(retries > 10, "tx refused but queue active\n"))
1510 goto drop;
1511 goto retry;
1512 }
1513 }
1514 out: 1472 out:
1515 rcu_read_unlock(); 1473 rcu_read_unlock();
1516 return; 1474 return result;
1517
1518 drop:
1519 rcu_read_unlock();
1520
1521 skb = tx.skb;
1522 while (skb) {
1523 next = skb->next;
1524 dev_kfree_skb(skb);
1525 skb = next;
1526 }
1527} 1475}
1528 1476
1529/* device xmit handlers */ 1477/* device xmit handlers */
@@ -2070,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local)
2070 skb_queue_purge(&local->pending[i]); 2018 skb_queue_purge(&local->pending[i]);
2071} 2019}
2072 2020
2021/*
2022 * Returns false if the frame couldn't be transmitted but was queued instead,
2023 * which in this case means re-queued -- take as an indication to stop sending
2024 * more pending frames.
2025 */
2073static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, 2026static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2074 struct sk_buff *skb) 2027 struct sk_buff *skb)
2075{ 2028{
@@ -2077,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2077 struct ieee80211_sub_if_data *sdata; 2030 struct ieee80211_sub_if_data *sdata;
2078 struct sta_info *sta; 2031 struct sta_info *sta;
2079 struct ieee80211_hdr *hdr; 2032 struct ieee80211_hdr *hdr;
2080 int ret; 2033 bool result;
2081 bool result = true;
2082 2034
2083 sdata = vif_to_sdata(info->control.vif); 2035 sdata = vif_to_sdata(info->control.vif);
2084 2036
2085 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { 2037 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2086 ieee80211_tx(sdata, skb, true); 2038 result = ieee80211_tx(sdata, skb, true);
2087 } else { 2039 } else {
2088 hdr = (struct ieee80211_hdr *)skb->data; 2040 hdr = (struct ieee80211_hdr *)skb->data;
2089 sta = sta_info_get(sdata, hdr->addr1); 2041 sta = sta_info_get(sdata, hdr->addr1);
2090 2042
2091 ret = __ieee80211_tx(local, &skb, sta, true); 2043 result = __ieee80211_tx(local, &skb, sta, true);
2092 if (ret != IEEE80211_TX_OK)
2093 result = false;
2094 } 2044 }
2095 2045
2096 return result; 2046 return result;
@@ -2132,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data)
2132 flags); 2082 flags);
2133 2083
2134 txok = ieee80211_tx_pending_skb(local, skb); 2084 txok = ieee80211_tx_pending_skb(local, skb);
2135 if (!txok)
2136 __skb_queue_head(&local->pending[i], skb);
2137 spin_lock_irqsave(&local->queue_stop_reason_lock, 2085 spin_lock_irqsave(&local->queue_stop_reason_lock,
2138 flags); 2086 flags);
2139 if (!txok) 2087 if (!txok)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index 3b970d343023..2c5b348eb3a8 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -1,6 +1,7 @@
1menuconfig IP_SET 1menuconfig IP_SET
2 tristate "IP set support" 2 tristate "IP set support"
3 depends on INET && NETFILTER 3 depends on INET && NETFILTER
4 depends on NETFILTER_NETLINK
4 help 5 help
5 This option adds IP set support to the kernel. 6 This option adds IP set support to the kernel.
6 In order to define and use the sets, you need the userspace utility 7 In order to define and use the sets, you need the userspace utility
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 83233fe24a08..9c2a517b69c8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -59,7 +59,7 @@ static int ip_vs_conn_tab_mask __read_mostly;
59/* 59/*
60 * Connection hash table: for input and output packets lookups of IPVS 60 * Connection hash table: for input and output packets lookups of IPVS
61 */ 61 */
62static struct list_head *ip_vs_conn_tab __read_mostly; 62static struct hlist_head *ip_vs_conn_tab __read_mostly;
63 63
64/* SLAB cache for IPVS connections */ 64/* SLAB cache for IPVS connections */
65static struct kmem_cache *ip_vs_conn_cachep __read_mostly; 65static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
@@ -201,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
201 spin_lock(&cp->lock); 201 spin_lock(&cp->lock);
202 202
203 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 203 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
204 list_add(&cp->c_list, &ip_vs_conn_tab[hash]); 204 hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
205 cp->flags |= IP_VS_CONN_F_HASHED; 205 cp->flags |= IP_VS_CONN_F_HASHED;
206 atomic_inc(&cp->refcnt); 206 atomic_inc(&cp->refcnt);
207 ret = 1; 207 ret = 1;
@@ -234,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
234 spin_lock(&cp->lock); 234 spin_lock(&cp->lock);
235 235
236 if (cp->flags & IP_VS_CONN_F_HASHED) { 236 if (cp->flags & IP_VS_CONN_F_HASHED) {
237 list_del(&cp->c_list); 237 hlist_del(&cp->c_list);
238 cp->flags &= ~IP_VS_CONN_F_HASHED; 238 cp->flags &= ~IP_VS_CONN_F_HASHED;
239 atomic_dec(&cp->refcnt); 239 atomic_dec(&cp->refcnt);
240 ret = 1; 240 ret = 1;
@@ -259,12 +259,13 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned hash; 260 unsigned hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n;
262 263
263 hash = ip_vs_conn_hashkey_param(p, false); 264 hash = ip_vs_conn_hashkey_param(p, false);
264 265
265 ct_read_lock(hash); 266 ct_read_lock(hash);
266 267
267 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 268 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
268 if (cp->af == p->af && 269 if (cp->af == p->af &&
269 p->cport == cp->cport && p->vport == cp->vport && 270 p->cport == cp->cport && p->vport == cp->vport &&
270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 271 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -345,12 +346,13 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
345{ 346{
346 unsigned hash; 347 unsigned hash;
347 struct ip_vs_conn *cp; 348 struct ip_vs_conn *cp;
349 struct hlist_node *n;
348 350
349 hash = ip_vs_conn_hashkey_param(p, false); 351 hash = ip_vs_conn_hashkey_param(p, false);
350 352
351 ct_read_lock(hash); 353 ct_read_lock(hash);
352 354
353 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 355 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
354 if (!ip_vs_conn_net_eq(cp, p->net)) 356 if (!ip_vs_conn_net_eq(cp, p->net))
355 continue; 357 continue;
356 if (p->pe_data && p->pe->ct_match) { 358 if (p->pe_data && p->pe->ct_match) {
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
394{ 396{
395 unsigned hash; 397 unsigned hash;
396 struct ip_vs_conn *cp, *ret=NULL; 398 struct ip_vs_conn *cp, *ret=NULL;
399 struct hlist_node *n;
397 400
398 /* 401 /*
399 * Check for "full" addressed entries 402 * Check for "full" addressed entries
@@ -402,7 +405,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
402 405
403 ct_read_lock(hash); 406 ct_read_lock(hash);
404 407
405 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 408 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
406 if (cp->af == p->af && 409 if (cp->af == p->af &&
407 p->vport == cp->cport && p->cport == cp->dport && 410 p->vport == cp->cport && p->cport == cp->dport &&
408 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 411 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -818,7 +821,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
818 return NULL; 821 return NULL;
819 } 822 }
820 823
821 INIT_LIST_HEAD(&cp->c_list); 824 INIT_HLIST_NODE(&cp->c_list);
822 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); 825 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
823 ip_vs_conn_net_set(cp, p->net); 826 ip_vs_conn_net_set(cp, p->net);
824 cp->af = p->af; 827 cp->af = p->af;
@@ -894,8 +897,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
894 */ 897 */
895#ifdef CONFIG_PROC_FS 898#ifdef CONFIG_PROC_FS
896struct ip_vs_iter_state { 899struct ip_vs_iter_state {
897 struct seq_net_private p; 900 struct seq_net_private p;
898 struct list_head *l; 901 struct hlist_head *l;
899}; 902};
900 903
901static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) 904static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
@@ -903,13 +906,14 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
903 int idx; 906 int idx;
904 struct ip_vs_conn *cp; 907 struct ip_vs_conn *cp;
905 struct ip_vs_iter_state *iter = seq->private; 908 struct ip_vs_iter_state *iter = seq->private;
909 struct hlist_node *n;
906 910
907 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 911 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
908 ct_read_lock_bh(idx); 912 ct_read_lock_bh(idx);
909 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 913 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
910 if (pos-- == 0) { 914 if (pos-- == 0) {
911 iter->l = &ip_vs_conn_tab[idx]; 915 iter->l = &ip_vs_conn_tab[idx];
912 return cp; 916 return cp;
913 } 917 }
914 } 918 }
915 ct_read_unlock_bh(idx); 919 ct_read_unlock_bh(idx);
@@ -930,7 +934,8 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
930{ 934{
931 struct ip_vs_conn *cp = v; 935 struct ip_vs_conn *cp = v;
932 struct ip_vs_iter_state *iter = seq->private; 936 struct ip_vs_iter_state *iter = seq->private;
933 struct list_head *e, *l = iter->l; 937 struct hlist_node *e;
938 struct hlist_head *l = iter->l;
934 int idx; 939 int idx;
935 940
936 ++*pos; 941 ++*pos;
@@ -938,15 +943,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
938 return ip_vs_conn_array(seq, 0); 943 return ip_vs_conn_array(seq, 0);
939 944
940 /* more on same hash chain? */ 945 /* more on same hash chain? */
941 if ((e = cp->c_list.next) != l) 946 if ((e = cp->c_list.next))
942 return list_entry(e, struct ip_vs_conn, c_list); 947 return hlist_entry(e, struct ip_vs_conn, c_list);
943 948
944 idx = l - ip_vs_conn_tab; 949 idx = l - ip_vs_conn_tab;
945 ct_read_unlock_bh(idx); 950 ct_read_unlock_bh(idx);
946 951
947 while (++idx < ip_vs_conn_tab_size) { 952 while (++idx < ip_vs_conn_tab_size) {
948 ct_read_lock_bh(idx); 953 ct_read_lock_bh(idx);
949 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 954 hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
950 iter->l = &ip_vs_conn_tab[idx]; 955 iter->l = &ip_vs_conn_tab[idx];
951 return cp; 956 return cp;
952 } 957 }
@@ -959,7 +964,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
959static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v) 964static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
960{ 965{
961 struct ip_vs_iter_state *iter = seq->private; 966 struct ip_vs_iter_state *iter = seq->private;
962 struct list_head *l = iter->l; 967 struct hlist_head *l = iter->l;
963 968
964 if (l) 969 if (l)
965 ct_read_unlock_bh(l - ip_vs_conn_tab); 970 ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -1148,13 +1153,14 @@ void ip_vs_random_dropentry(struct net *net)
1148 */ 1153 */
1149 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1154 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1150 unsigned hash = net_random() & ip_vs_conn_tab_mask; 1155 unsigned hash = net_random() & ip_vs_conn_tab_mask;
1156 struct hlist_node *n;
1151 1157
1152 /* 1158 /*
1153 * Lock is actually needed in this loop. 1159 * Lock is actually needed in this loop.
1154 */ 1160 */
1155 ct_write_lock_bh(hash); 1161 ct_write_lock_bh(hash);
1156 1162
1157 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 1163 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
1158 if (cp->flags & IP_VS_CONN_F_TEMPLATE) 1164 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1159 /* connection template */ 1165 /* connection template */
1160 continue; 1166 continue;
@@ -1202,12 +1208,14 @@ static void ip_vs_conn_flush(struct net *net)
1202 1208
1203flush_again: 1209flush_again:
1204 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1210 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1211 struct hlist_node *n;
1212
1205 /* 1213 /*
1206 * Lock is actually needed in this loop. 1214 * Lock is actually needed in this loop.
1207 */ 1215 */
1208 ct_write_lock_bh(idx); 1216 ct_write_lock_bh(idx);
1209 1217
1210 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 1218 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
1211 if (!ip_vs_conn_net_eq(cp, net)) 1219 if (!ip_vs_conn_net_eq(cp, net))
1212 continue; 1220 continue;
1213 IP_VS_DBG(4, "del connection\n"); 1221 IP_VS_DBG(4, "del connection\n");
@@ -1265,8 +1273,7 @@ int __init ip_vs_conn_init(void)
1265 /* 1273 /*
1266 * Allocate the connection hash table and initialize its list heads 1274 * Allocate the connection hash table and initialize its list heads
1267 */ 1275 */
1268 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * 1276 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
1269 sizeof(struct list_head));
1270 if (!ip_vs_conn_tab) 1277 if (!ip_vs_conn_tab)
1271 return -ENOMEM; 1278 return -ENOMEM;
1272 1279
@@ -1286,9 +1293,8 @@ int __init ip_vs_conn_init(void)
1286 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", 1293 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1287 sizeof(struct ip_vs_conn)); 1294 sizeof(struct ip_vs_conn));
1288 1295
1289 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1296 for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
1290 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); 1297 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1291 }
1292 1298
1293 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) { 1299 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
1294 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); 1300 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4d06617fab6c..2d1f932add46 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -729,7 +729,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
729#endif 729#endif
730 730
731/* Handle relevant response ICMP messages - forward to the right 731/* Handle relevant response ICMP messages - forward to the right
732 * destination host. Used for NAT and local client. 732 * destination host.
733 */ 733 */
734static int handle_response_icmp(int af, struct sk_buff *skb, 734static int handle_response_icmp(int af, struct sk_buff *skb,
735 union nf_inet_addr *snet, 735 union nf_inet_addr *snet,
@@ -979,7 +979,6 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
979} 979}
980 980
981/* Handle response packets: rewrite addresses and send away... 981/* Handle response packets: rewrite addresses and send away...
982 * Used for NAT and local client.
983 */ 982 */
984static unsigned int 983static unsigned int
985handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 984handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
@@ -1280,7 +1279,6 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1280 struct ip_vs_protocol *pp; 1279 struct ip_vs_protocol *pp;
1281 struct ip_vs_proto_data *pd; 1280 struct ip_vs_proto_data *pd;
1282 unsigned int offset, ihl, verdict; 1281 unsigned int offset, ihl, verdict;
1283 union nf_inet_addr snet;
1284 1282
1285 *related = 1; 1283 *related = 1;
1286 1284
@@ -1339,17 +1337,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1339 ip_vs_fill_iphdr(AF_INET, cih, &ciph); 1337 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
1340 /* The embedded headers contain source and dest in reverse order */ 1338 /* The embedded headers contain source and dest in reverse order */
1341 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1); 1339 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
1342 if (!cp) { 1340 if (!cp)
1343 /* The packet could also belong to a local client */
1344 cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
1345 if (cp) {
1346 snet.ip = iph->saddr;
1347 return handle_response_icmp(AF_INET, skb, &snet,
1348 cih->protocol, cp, pp,
1349 offset, ihl);
1350 }
1351 return NF_ACCEPT; 1341 return NF_ACCEPT;
1352 }
1353 1342
1354 verdict = NF_DROP; 1343 verdict = NF_DROP;
1355 1344
@@ -1395,7 +1384,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1395 struct ip_vs_protocol *pp; 1384 struct ip_vs_protocol *pp;
1396 struct ip_vs_proto_data *pd; 1385 struct ip_vs_proto_data *pd;
1397 unsigned int offset, verdict; 1386 unsigned int offset, verdict;
1398 union nf_inet_addr snet;
1399 struct rt6_info *rt; 1387 struct rt6_info *rt;
1400 1388
1401 *related = 1; 1389 *related = 1;
@@ -1455,18 +1443,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1455 ip_vs_fill_iphdr(AF_INET6, cih, &ciph); 1443 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1456 /* The embedded headers contain source and dest in reverse order */ 1444 /* The embedded headers contain source and dest in reverse order */
1457 cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1); 1445 cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
1458 if (!cp) { 1446 if (!cp)
1459 /* The packet could also belong to a local client */
1460 cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
1461 if (cp) {
1462 ipv6_addr_copy(&snet.in6, &iph->saddr);
1463 return handle_response_icmp(AF_INET6, skb, &snet,
1464 cih->nexthdr,
1465 cp, pp, offset,
1466 sizeof(struct ipv6hdr));
1467 }
1468 return NF_ACCEPT; 1447 return NF_ACCEPT;
1469 }
1470 1448
1471 verdict = NF_DROP; 1449 verdict = NF_DROP;
1472 1450
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c73b0c831a2d..d69ec26b6bd4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -769,9 +769,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
769 dest->u_threshold = udest->u_threshold; 769 dest->u_threshold = udest->u_threshold;
770 dest->l_threshold = udest->l_threshold; 770 dest->l_threshold = udest->l_threshold;
771 771
772 spin_lock(&dest->dst_lock); 772 spin_lock_bh(&dest->dst_lock);
773 ip_vs_dst_reset(dest); 773 ip_vs_dst_reset(dest);
774 spin_unlock(&dest->dst_lock); 774 spin_unlock_bh(&dest->dst_lock);
775 775
776 if (add) 776 if (add)
777 ip_vs_new_estimator(svc->net, &dest->stats); 777 ip_vs_new_estimator(svc->net, &dest->stats);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 00b5ffab3768..6bf7a807649c 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
389 int loh, doh; 389 int loh, doh;
390 390
391 /* 391 /*
392 * We think the overhead of processing active connections is fifty 392 * We use the following formula to estimate the load:
393 * times higher than that of inactive connections in average. (This
394 * fifty times might not be accurate, we will change it later.) We
395 * use the following formula to estimate the overhead:
396 * dest->activeconns*50 + dest->inactconns
397 * and the load:
398 * (dest overhead) / dest->weight 393 * (dest overhead) / dest->weight
399 * 394 *
400 * Remember -- no floats in kernel mode!!! 395 * Remember -- no floats in kernel mode!!!
@@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
410 continue; 405 continue;
411 if (atomic_read(&dest->weight) > 0) { 406 if (atomic_read(&dest->weight) > 0) {
412 least = dest; 407 least = dest;
413 loh = atomic_read(&least->activeconns) * 50 408 loh = ip_vs_dest_conn_overhead(least);
414 + atomic_read(&least->inactconns);
415 goto nextstage; 409 goto nextstage;
416 } 410 }
417 } 411 }
@@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
425 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 419 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
426 continue; 420 continue;
427 421
428 doh = atomic_read(&dest->activeconns) * 50 422 doh = ip_vs_dest_conn_overhead(dest);
429 + atomic_read(&dest->inactconns);
430 if (loh * atomic_read(&dest->weight) > 423 if (loh * atomic_read(&dest->weight) >
431 doh * atomic_read(&least->weight)) { 424 doh * atomic_read(&least->weight)) {
432 least = dest; 425 least = dest;
@@ -510,7 +503,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
510 /* No cache entry or it is invalid, time to schedule */ 503 /* No cache entry or it is invalid, time to schedule */
511 dest = __ip_vs_lblc_schedule(svc); 504 dest = __ip_vs_lblc_schedule(svc);
512 if (!dest) { 505 if (!dest) {
513 IP_VS_ERR_RL("LBLC: no destination available\n"); 506 ip_vs_scheduler_err(svc, "no destination available");
514 return NULL; 507 return NULL;
515 } 508 }
516 509
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index bfa25f1ea9e4..00631765b92a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
178 178
179 if ((atomic_read(&least->weight) > 0) 179 if ((atomic_read(&least->weight) > 0)
180 && (least->flags & IP_VS_DEST_F_AVAILABLE)) { 180 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
181 loh = atomic_read(&least->activeconns) * 50 181 loh = ip_vs_dest_conn_overhead(least);
182 + atomic_read(&least->inactconns);
183 goto nextstage; 182 goto nextstage;
184 } 183 }
185 } 184 }
@@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
192 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 191 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
193 continue; 192 continue;
194 193
195 doh = atomic_read(&dest->activeconns) * 50 194 doh = ip_vs_dest_conn_overhead(dest);
196 + atomic_read(&dest->inactconns);
197 if ((loh * atomic_read(&dest->weight) > 195 if ((loh * atomic_read(&dest->weight) >
198 doh * atomic_read(&least->weight)) 196 doh * atomic_read(&least->weight))
199 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 197 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
228 list_for_each_entry(e, &set->list, list) { 226 list_for_each_entry(e, &set->list, list) {
229 most = e->dest; 227 most = e->dest;
230 if (atomic_read(&most->weight) > 0) { 228 if (atomic_read(&most->weight) > 0) {
231 moh = atomic_read(&most->activeconns) * 50 229 moh = ip_vs_dest_conn_overhead(most);
232 + atomic_read(&most->inactconns);
233 goto nextstage; 230 goto nextstage;
234 } 231 }
235 } 232 }
@@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
239 nextstage: 236 nextstage:
240 list_for_each_entry(e, &set->list, list) { 237 list_for_each_entry(e, &set->list, list) {
241 dest = e->dest; 238 dest = e->dest;
242 doh = atomic_read(&dest->activeconns) * 50 239 doh = ip_vs_dest_conn_overhead(dest);
243 + atomic_read(&dest->inactconns);
244 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 240 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
245 if ((moh * atomic_read(&dest->weight) < 241 if ((moh * atomic_read(&dest->weight) <
246 doh * atomic_read(&most->weight)) 242 doh * atomic_read(&most->weight))
@@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
563 int loh, doh; 559 int loh, doh;
564 560
565 /* 561 /*
566 * We think the overhead of processing active connections is fifty 562 * We use the following formula to estimate the load:
567 * times higher than that of inactive connections in average. (This
568 * fifty times might not be accurate, we will change it later.) We
569 * use the following formula to estimate the overhead:
570 * dest->activeconns*50 + dest->inactconns
571 * and the load:
572 * (dest overhead) / dest->weight 563 * (dest overhead) / dest->weight
573 * 564 *
574 * Remember -- no floats in kernel mode!!! 565 * Remember -- no floats in kernel mode!!!
@@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
585 576
586 if (atomic_read(&dest->weight) > 0) { 577 if (atomic_read(&dest->weight) > 0) {
587 least = dest; 578 least = dest;
588 loh = atomic_read(&least->activeconns) * 50 579 loh = ip_vs_dest_conn_overhead(least);
589 + atomic_read(&least->inactconns);
590 goto nextstage; 580 goto nextstage;
591 } 581 }
592 } 582 }
@@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
600 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 590 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
601 continue; 591 continue;
602 592
603 doh = atomic_read(&dest->activeconns) * 50 593 doh = ip_vs_dest_conn_overhead(dest);
604 + atomic_read(&dest->inactconns);
605 if (loh * atomic_read(&dest->weight) > 594 if (loh * atomic_read(&dest->weight) >
606 doh * atomic_read(&least->weight)) { 595 doh * atomic_read(&least->weight)) {
607 least = dest; 596 least = dest;
@@ -692,7 +681,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
692 /* The cache entry is invalid, time to schedule */ 681 /* The cache entry is invalid, time to schedule */
693 dest = __ip_vs_lblcr_schedule(svc); 682 dest = __ip_vs_lblcr_schedule(svc);
694 if (!dest) { 683 if (!dest) {
695 IP_VS_ERR_RL("LBLCR: no destination available\n"); 684 ip_vs_scheduler_err(svc, "no destination available");
696 read_unlock(&svc->sched_lock); 685 read_unlock(&svc->sched_lock);
697 return NULL; 686 return NULL;
698 } 687 }
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 4f69db1fac56..f391819c0cca 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -22,22 +22,6 @@
22 22
23#include <net/ip_vs.h> 23#include <net/ip_vs.h>
24 24
25
26static inline unsigned int
27ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
28{
29 /*
30 * We think the overhead of processing active connections is 256
31 * times higher than that of inactive connections in average. (This
32 * 256 times might not be accurate, we will change it later) We
33 * use the following formula to estimate the overhead now:
34 * dest->activeconns*256 + dest->inactconns
35 */
36 return (atomic_read(&dest->activeconns) << 8) +
37 atomic_read(&dest->inactconns);
38}
39
40
41/* 25/*
42 * Least Connection scheduling 26 * Least Connection scheduling
43 */ 27 */
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
62 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || 46 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
63 atomic_read(&dest->weight) == 0) 47 atomic_read(&dest->weight) == 0)
64 continue; 48 continue;
65 doh = ip_vs_lc_dest_overhead(dest); 49 doh = ip_vs_dest_conn_overhead(dest);
66 if (!least || doh < loh) { 50 if (!least || doh < loh) {
67 least = dest; 51 least = dest;
68 loh = doh; 52 loh = doh;
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
70 } 54 }
71 55
72 if (!least) 56 if (!least)
73 IP_VS_ERR_RL("LC: no destination available\n"); 57 ip_vs_scheduler_err(svc, "no destination available");
74 else 58 else
75 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " 59 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
76 "inactconns %d\n", 60 "inactconns %d\n",
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index c413e1830823..984d9c137d84 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
99 } 99 }
100 100
101 if (!least) { 101 if (!least) {
102 IP_VS_ERR_RL("NQ: no destination available\n"); 102 ip_vs_scheduler_err(svc, "no destination available");
103 return NULL; 103 return NULL;
104 } 104 }
105 105
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index e210f37d8ea2..c49b388d1085 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
72 q = q->next; 72 q = q->next;
73 } while (q != p); 73 } while (q != p);
74 write_unlock(&svc->sched_lock); 74 write_unlock(&svc->sched_lock);
75 IP_VS_ERR_RL("RR: no destination available\n"); 75 ip_vs_scheduler_err(svc, "no destination available");
76 return NULL; 76 return NULL;
77 77
78 out: 78 out:
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 076ebe00435d..08dbdd5bc18f 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -29,6 +29,7 @@
29 29
30#include <net/ip_vs.h> 30#include <net/ip_vs.h>
31 31
32EXPORT_SYMBOL(ip_vs_scheduler_err);
32/* 33/*
33 * IPVS scheduler list 34 * IPVS scheduler list
34 */ 35 */
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
146 module_put(scheduler->module); 147 module_put(scheduler->module);
147} 148}
148 149
150/*
151 * Common error output helper for schedulers
152 */
153
154void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
155{
156 if (svc->fwmark) {
157 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
158 svc->scheduler->name, svc->fwmark,
159 svc->fwmark, msg);
160#ifdef CONFIG_IP_VS_IPV6
161 } else if (svc->af == AF_INET6) {
162 IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
163 svc->scheduler->name,
164 ip_vs_proto_name(svc->protocol),
165 &svc->addr.in6, ntohs(svc->port), msg);
166#endif
167 } else {
168 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
169 svc->scheduler->name,
170 ip_vs_proto_name(svc->protocol),
171 &svc->addr.ip, ntohs(svc->port), msg);
172 }
173}
149 174
150/* 175/*
151 * Register a scheduler in the scheduler list 176 * Register a scheduler in the scheduler list
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 1ab75a9dc400..89ead246ed3d 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
87 goto nextstage; 87 goto nextstage;
88 } 88 }
89 } 89 }
90 IP_VS_ERR_RL("SED: no destination available\n"); 90 ip_vs_scheduler_err(svc, "no destination available");
91 return NULL; 91 return NULL;
92 92
93 /* 93 /*
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index e6cc174fbc06..b5e2556c581a 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
223 || !(dest->flags & IP_VS_DEST_F_AVAILABLE) 223 || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
224 || atomic_read(&dest->weight) <= 0 224 || atomic_read(&dest->weight) <= 0
225 || is_overloaded(dest)) { 225 || is_overloaded(dest)) {
226 IP_VS_ERR_RL("SH: no destination available\n"); 226 ip_vs_scheduler_err(svc, "no destination available");
227 return NULL; 227 return NULL;
228 } 228 }
229 229
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d1b7298e5894..fecf24de4af3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -374,8 +374,8 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
374 struct ip_vs_sync_buff *sb; 374 struct ip_vs_sync_buff *sb;
375 375
376 spin_lock_bh(&ipvs->sync_buff_lock); 376 spin_lock_bh(&ipvs->sync_buff_lock);
377 if (ipvs->sync_buff && (time == 0 || 377 if (ipvs->sync_buff &&
378 time_before(jiffies - ipvs->sync_buff->firstuse, time))) { 378 time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
379 sb = ipvs->sync_buff; 379 sb = ipvs->sync_buff;
380 ipvs->sync_buff = NULL; 380 ipvs->sync_buff = NULL;
381 } else 381 } else
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index bbddfdb10db2..bc1bfc48a17f 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -27,22 +27,6 @@
27 27
28#include <net/ip_vs.h> 28#include <net/ip_vs.h>
29 29
30
31static inline unsigned int
32ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
33{
34 /*
35 * We think the overhead of processing active connections is 256
36 * times higher than that of inactive connections in average. (This
37 * 256 times might not be accurate, we will change it later) We
38 * use the following formula to estimate the overhead now:
39 * dest->activeconns*256 + dest->inactconns
40 */
41 return (atomic_read(&dest->activeconns) << 8) +
42 atomic_read(&dest->inactconns);
43}
44
45
46/* 30/*
47 * Weighted Least Connection scheduling 31 * Weighted Least Connection scheduling
48 */ 32 */
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
71 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 55 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
72 atomic_read(&dest->weight) > 0) { 56 atomic_read(&dest->weight) > 0) {
73 least = dest; 57 least = dest;
74 loh = ip_vs_wlc_dest_overhead(least); 58 loh = ip_vs_dest_conn_overhead(least);
75 goto nextstage; 59 goto nextstage;
76 } 60 }
77 } 61 }
78 IP_VS_ERR_RL("WLC: no destination available\n"); 62 ip_vs_scheduler_err(svc, "no destination available");
79 return NULL; 63 return NULL;
80 64
81 /* 65 /*
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
85 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 69 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
86 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 70 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
87 continue; 71 continue;
88 doh = ip_vs_wlc_dest_overhead(dest); 72 doh = ip_vs_dest_conn_overhead(dest);
89 if (loh * atomic_read(&dest->weight) > 73 if (loh * atomic_read(&dest->weight) >
90 doh * atomic_read(&least->weight)) { 74 doh * atomic_read(&least->weight)) {
91 least = dest; 75 least = dest;
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 30db633f88f1..1ef41f50723c 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
147 147
148 if (mark->cl == mark->cl->next) { 148 if (mark->cl == mark->cl->next) {
149 /* no dest entry */ 149 /* no dest entry */
150 IP_VS_ERR_RL("WRR: no destination available: " 150 ip_vs_scheduler_err(svc,
151 "no destinations present\n"); 151 "no destination available: "
152 "no destinations present");
152 dest = NULL; 153 dest = NULL;
153 goto out; 154 goto out;
154 } 155 }
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
162 */ 163 */
163 if (mark->cw == 0) { 164 if (mark->cw == 0) {
164 mark->cl = &svc->destinations; 165 mark->cl = &svc->destinations;
165 IP_VS_ERR_RL("WRR: no destination " 166 ip_vs_scheduler_err(svc,
166 "available\n"); 167 "no destination available");
167 dest = NULL; 168 dest = NULL;
168 goto out; 169 goto out;
169 } 170 }
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
185 /* back to the start, and no dest is found. 186 /* back to the start, and no dest is found.
186 It is only possible when all dests are OVERLOADED */ 187 It is only possible when all dests are OVERLOADED */
187 dest = NULL; 188 dest = NULL;
188 IP_VS_ERR_RL("WRR: no destination available: " 189 ip_vs_scheduler_err(svc,
189 "all destinations are overloaded\n"); 190 "no destination available: "
191 "all destinations are overloaded");
190 goto out; 192 goto out;
191 } 193 }
192 } 194 }
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 1f2a4e35fb11..878f6dd9dbad 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -43,6 +43,13 @@
43 43
44#include <net/ip_vs.h> 44#include <net/ip_vs.h>
45 45
46enum {
47 IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
48 IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
50 * local
51 */
52};
46 53
47/* 54/*
48 * Destination cache to speed up outgoing route lookup 55 * Destination cache to speed up outgoing route lookup
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
77 return dst; 84 return dst;
78} 85}
79 86
80/* 87/* Get route to destination or remote server */
81 * Get route to destination or remote server
82 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
83 * &4=Allow redirect from remote daddr to local
84 */
85static struct rtable * 88static struct rtable *
86__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
87 __be32 daddr, u32 rtos, int rt_mode) 90 __be32 daddr, u32 rtos, int rt_mode)
@@ -100,7 +103,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
100 .fl4_tos = rtos, 103 .fl4_tos = rtos,
101 }; 104 };
102 105
103 if (ip_route_output_key(net, &rt, &fl)) { 106 rt = ip_route_output_key(net, &fl);
107 if (IS_ERR(rt)) {
104 spin_unlock(&dest->dst_lock); 108 spin_unlock(&dest->dst_lock);
105 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
106 &dest->addr.ip); 110 &dest->addr.ip);
@@ -118,7 +122,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
118 .fl4_tos = rtos, 122 .fl4_tos = rtos,
119 }; 123 };
120 124
121 if (ip_route_output_key(net, &rt, &fl)) { 125 rt = ip_route_output_key(net, &fl);
126 if (IS_ERR(rt)) {
122 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 127 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
123 &daddr); 128 &daddr);
124 return NULL; 129 return NULL;
@@ -126,15 +131,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
126 } 131 }
127 132
128 local = rt->rt_flags & RTCF_LOCAL; 133 local = rt->rt_flags & RTCF_LOCAL;
129 if (!((local ? 1 : 2) & rt_mode)) { 134 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
135 rt_mode)) {
130 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", 136 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
131 (rt->rt_flags & RTCF_LOCAL) ? 137 (rt->rt_flags & RTCF_LOCAL) ?
132 "local":"non-local", &rt->rt_dst); 138 "local":"non-local", &rt->rt_dst);
133 ip_rt_put(rt); 139 ip_rt_put(rt);
134 return NULL; 140 return NULL;
135 } 141 }
136 if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) && 142 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
137 ort->rt_flags & RTCF_LOCAL)) { 143 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
138 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local " 144 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
139 "requires NAT method, dest: %pI4\n", 145 "requires NAT method, dest: %pI4\n",
140 &ip_hdr(skb)->daddr, &rt->rt_dst); 146 &ip_hdr(skb)->daddr, &rt->rt_dst);
@@ -176,7 +182,8 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
176 .mark = skb->mark, 182 .mark = skb->mark,
177 }; 183 };
178 184
179 if (ip_route_output_key(net, &rt, &fl)) 185 rt = ip_route_output_key(net, &fl);
186 if (IS_ERR(rt))
180 return 0; 187 return 0;
181 if (!(rt->rt_flags & RTCF_LOCAL)) { 188 if (!(rt->rt_flags & RTCF_LOCAL)) {
182 ip_rt_put(rt); 189 ip_rt_put(rt);
@@ -214,8 +221,13 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
214 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 221 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
215 &fl.fl6_dst, 0, &fl.fl6_src) < 0) 222 &fl.fl6_dst, 0, &fl.fl6_src) < 0)
216 goto out_err; 223 goto out_err;
217 if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0) 224 if (do_xfrm) {
218 goto out_err; 225 dst = xfrm_lookup(net, dst, &fl, NULL, 0);
226 if (IS_ERR(dst)) {
227 dst = NULL;
228 goto out_err;
229 }
230 }
219 ipv6_addr_copy(ret_saddr, &fl.fl6_src); 231 ipv6_addr_copy(ret_saddr, &fl.fl6_src);
220 return dst; 232 return dst;
221 233
@@ -383,8 +395,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
383 395
384 EnterFunction(10); 396 EnterFunction(10);
385 397
386 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, 398 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
387 RT_TOS(iph->tos), 2))) 399 IP_VS_RT_MODE_NON_LOCAL)))
388 goto tx_error_icmp; 400 goto tx_error_icmp;
389 401
390 /* MTU checking */ 402 /* MTU checking */
@@ -512,7 +524,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
512 } 524 }
513 525
514 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 526 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
515 RT_TOS(iph->tos), 1|2|4))) 527 RT_TOS(iph->tos),
528 IP_VS_RT_MODE_LOCAL |
529 IP_VS_RT_MODE_NON_LOCAL |
530 IP_VS_RT_MODE_RDR)))
516 goto tx_error_icmp; 531 goto tx_error_icmp;
517 local = rt->rt_flags & RTCF_LOCAL; 532 local = rt->rt_flags & RTCF_LOCAL;
518 /* 533 /*
@@ -755,7 +770,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
755 EnterFunction(10); 770 EnterFunction(10);
756 771
757 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 772 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
758 RT_TOS(tos), 1|2))) 773 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
774 IP_VS_RT_MODE_NON_LOCAL)))
759 goto tx_error_icmp; 775 goto tx_error_icmp;
760 if (rt->rt_flags & RTCF_LOCAL) { 776 if (rt->rt_flags & RTCF_LOCAL) {
761 ip_rt_put(rt); 777 ip_rt_put(rt);
@@ -984,7 +1000,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
984 EnterFunction(10); 1000 EnterFunction(10);
985 1001
986 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1002 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
987 RT_TOS(iph->tos), 1|2))) 1003 RT_TOS(iph->tos),
1004 IP_VS_RT_MODE_LOCAL |
1005 IP_VS_RT_MODE_NON_LOCAL)))
988 goto tx_error_icmp; 1006 goto tx_error_icmp;
989 if (rt->rt_flags & RTCF_LOCAL) { 1007 if (rt->rt_flags & RTCF_LOCAL) {
990 ip_rt_put(rt); 1008 ip_rt_put(rt);
@@ -1128,7 +1146,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1128 */ 1146 */
1129 1147
1130 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1148 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1131 RT_TOS(ip_hdr(skb)->tos), 1|2|4))) 1149 RT_TOS(ip_hdr(skb)->tos),
1150 IP_VS_RT_MODE_LOCAL |
1151 IP_VS_RT_MODE_NON_LOCAL |
1152 IP_VS_RT_MODE_RDR)))
1132 goto tx_error_icmp; 1153 goto tx_error_icmp;
1133 local = rt->rt_flags & RTCF_LOCAL; 1154 local = rt->rt_flags & RTCF_LOCAL;
1134 1155
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 6f38d0e2ea4a..37bf94394be0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
227 * sCL -> sIV 227 * sCL -> sIV
228 */ 228 */
229/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ 229/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
230/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, 230/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
231/* 231/*
232 * sSS -> sSR Standard open. 232 * sSS -> sSR Standard open.
233 * sS2 -> sSR Simultaneous open 233 * sS2 -> sSR Simultaneous open
234 * sSR -> sSR Retransmitted SYN/ACK. 234 * sSR -> sIG Retransmitted SYN/ACK, ignore it.
235 * sES -> sIG Late retransmitted SYN/ACK? 235 * sES -> sIG Late retransmitted SYN/ACK?
236 * sFW -> sIG Might be SYN/ACK answering ignored SYN 236 * sFW -> sIG Might be SYN/ACK answering ignored SYN
237 * sCW -> sIG 237 * sCW -> sIG
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20c775cff2a8..20714edf6cd2 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
85 85
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
87{ 87{
88 if (pf >= ARRAY_SIZE(nf_loggers))
89 return -EINVAL;
88 mutex_lock(&nf_log_mutex); 90 mutex_lock(&nf_log_mutex);
89 if (__find_logger(pf, logger->name) == NULL) { 91 if (__find_logger(pf, logger->name) == NULL) {
90 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
98 100
99void nf_log_unbind_pf(u_int8_t pf) 101void nf_log_unbind_pf(u_int8_t pf)
100{ 102{
103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return;
101 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
102 rcu_assign_pointer(nf_loggers[pf], NULL); 106 rcu_assign_pointer(nf_loggers[pf], NULL);
103 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 91592da504b9..985e9b76c916 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
376 unsigned int hooknum, 376 unsigned int hooknum,
377 const struct net_device *indev, 377 const struct net_device *indev,
378 const struct net_device *outdev, 378 const struct net_device *outdev,
379 const struct nf_loginfo *li,
380 const char *prefix, unsigned int plen) 379 const char *prefix, unsigned int plen)
381{ 380{
382 struct nfulnl_msg_packet_hdr pmsg; 381 struct nfulnl_msg_packet_hdr pmsg;
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
652 inst->qlen++; 651 inst->qlen++;
653 652
654 __build_packet_message(inst, skb, data_len, pf, 653 __build_packet_message(inst, skb, data_len, pf,
655 hooknum, in, out, li, prefix, plen); 654 hooknum, in, out, prefix, plen);
656 655
657 if (inst->qlen >= qthreshold) 656 if (inst->qlen >= qthreshold)
658 __nfulnl_flush(inst); 657 __nfulnl_flush(inst);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 5128a6c4cb2c..624725b5286f 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -73,7 +73,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
73 fl.fl4_dst = info->gw.ip; 73 fl.fl4_dst = info->gw.ip;
74 fl.fl4_tos = RT_TOS(iph->tos); 74 fl.fl4_tos = RT_TOS(iph->tos);
75 fl.fl4_scope = RT_SCOPE_UNIVERSE; 75 fl.fl4_scope = RT_SCOPE_UNIVERSE;
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 rt = ip_route_output_key(net, &fl);
77 if (IS_ERR(rt))
77 return false; 78 return false;
78 79
79 skb_dst_drop(skb); 80 skb_dst_drop(skb);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 4ef1b63ad73f..2c0086a4751e 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -272,6 +272,11 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
272{ 272{
273 int ret; 273 int ret;
274 274
275 if (strcmp(par->table, "raw") == 0) {
276 pr_info("state is undetermined at the time of raw table\n");
277 return -EINVAL;
278 }
279
275 ret = nf_ct_l3proto_try_module_get(par->family); 280 ret = nf_ct_l3proto_try_module_get(par->family);
276 if (ret < 0) 281 if (ret < 0)
277 pr_info("cannot load conntrack support for proto=%u\n", 282 pr_info("cannot load conntrack support for proto=%u\n",
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 6caef8b20611..f4fc4c9ad567 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -49,9 +49,9 @@
49static inline void netlbl_netlink_auditinfo(struct sk_buff *skb, 49static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
50 struct netlbl_audit *audit_info) 50 struct netlbl_audit *audit_info)
51{ 51{
52 audit_info->secid = NETLINK_CB(skb).sid; 52 security_task_getsecid(current, &audit_info->secid);
53 audit_info->loginuid = NETLINK_CB(skb).loginuid; 53 audit_info->loginuid = audit_get_loginuid(current);
54 audit_info->sessionid = NETLINK_CB(skb).sessionid; 54 audit_info->sessionid = audit_get_sessionid(current);
55} 55}
56 56
57/* NetLabel NETLINK I/O functions */ 57/* NetLabel NETLINK I/O functions */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..c8f35b5d2ee9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1362,17 +1362,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1362 1362
1363 NETLINK_CB(skb).pid = nlk->pid; 1363 NETLINK_CB(skb).pid = nlk->pid;
1364 NETLINK_CB(skb).dst_group = dst_group; 1364 NETLINK_CB(skb).dst_group = dst_group;
1365 NETLINK_CB(skb).loginuid = audit_get_loginuid(current);
1366 NETLINK_CB(skb).sessionid = audit_get_sessionid(current);
1367 security_task_getsecid(current, &(NETLINK_CB(skb).sid));
1368 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1365 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1369 1366
1370 /* What can I do? Netlink is asynchronous, so that
1371 we will have to save current capabilities to
1372 check them, when this message will be delivered
1373 to corresponding kernel module. --ANK (980802)
1374 */
1375
1376 err = -EFAULT; 1367 err = -EFAULT;
1377 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1368 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1378 kfree_skb(skb); 1369 kfree_skb(skb);
@@ -1407,7 +1398,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 int noblock = flags&MSG_DONTWAIT; 1398 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1399 size_t copied;
1409 struct sk_buff *skb, *data_skb; 1400 struct sk_buff *skb, *data_skb;
1410 int err; 1401 int err, ret;
1411 1402
1412 if (flags&MSG_OOB) 1403 if (flags&MSG_OOB)
1413 return -EOPNOTSUPP; 1404 return -EOPNOTSUPP;
@@ -1470,8 +1461,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1470 1461
1471 skb_free_datagram(sk, skb); 1462 skb_free_datagram(sk, skb);
1472 1463
1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1464 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1474 netlink_dump(sk); 1465 ret = netlink_dump(sk);
1466 if (ret) {
1467 sk->sk_err = ret;
1468 sk->sk_error_report(sk);
1469 }
1470 }
1475 1471
1476 scm_recv(sock, msg, siocb->scm, flags); 1472 scm_recv(sock, msg, siocb->scm, flags);
1477out: 1473out:
@@ -1736,6 +1732,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1736 struct netlink_callback *cb; 1732 struct netlink_callback *cb;
1737 struct sock *sk; 1733 struct sock *sk;
1738 struct netlink_sock *nlk; 1734 struct netlink_sock *nlk;
1735 int ret;
1739 1736
1740 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1737 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1741 if (cb == NULL) 1738 if (cb == NULL)
@@ -1764,9 +1761,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1764 nlk->cb = cb; 1761 nlk->cb = cb;
1765 mutex_unlock(nlk->cb_mutex); 1762 mutex_unlock(nlk->cb_mutex);
1766 1763
1767 netlink_dump(sk); 1764 ret = netlink_dump(sk);
1765
1768 sock_put(sk); 1766 sock_put(sk);
1769 1767
1768 if (ret)
1769 return ret;
1770
1770 /* We successfully started a dump, by returning -EINTR we 1771 /* We successfully started a dump, by returning -EINTR we
1771 * signal not to send ACK even if it was requested. 1772 * signal not to send ACK even if it was requested.
1772 */ 1773 */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5efef5b5879e..b5362e96022b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -966,7 +966,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
966 966
967static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 967static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
968{ 968{
969 struct socket *sock;
970 struct sk_buff *skb; 969 struct sk_buff *skb;
971 struct net_device *dev; 970 struct net_device *dev;
972 __be16 proto; 971 __be16 proto;
@@ -978,8 +977,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
978 int len_sum = 0; 977 int len_sum = 0;
979 int status = 0; 978 int status = 0;
980 979
981 sock = po->sk.sk_socket;
982
983 mutex_lock(&po->pg_vec_lock); 980 mutex_lock(&po->pg_vec_lock);
984 981
985 err = -EBUSY; 982 err = -EBUSY;
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 89315009bab1..1a2b0633fece 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
423 goto protocol_error; 423 goto protocol_error;
424 } 424 }
425 425
426 case RXRPC_PACKET_TYPE_ACKALL:
426 case RXRPC_PACKET_TYPE_ACK: 427 case RXRPC_PACKET_TYPE_ACK:
427 /* ACK processing is done in process context */ 428 /* ACK processing is done in process context */
428 read_lock_bh(&call->state_lock); 429 read_lock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
89 return ret; 89 return ret;
90 90
91 plen -= sizeof(*token); 91 plen -= sizeof(*token);
92 token = kmalloc(sizeof(*token), GFP_KERNEL); 92 token = kzalloc(sizeof(*token), GFP_KERNEL);
93 if (!token) 93 if (!token)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 token->kad = kmalloc(plen, GFP_KERNEL); 96 token->kad = kzalloc(plen, GFP_KERNEL);
97 if (!token->kad) { 97 if (!token->kad) {
98 kfree(token); 98 kfree(token);
99 return -ENOMEM; 99 return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
731 goto error; 731 goto error;
732 732
733 ret = -ENOMEM; 733 ret = -ENOMEM;
734 token = kmalloc(sizeof(*token), GFP_KERNEL); 734 token = kzalloc(sizeof(*token), GFP_KERNEL);
735 if (!token) 735 if (!token)
736 goto error; 736 goto error;
737 token->kad = kmalloc(plen, GFP_KERNEL); 737 token->kad = kzalloc(plen, GFP_KERNEL);
738 if (!token->kad) 738 if (!token->kad)
739 goto error_free; 739 goto error_free;
740 740
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index a53fb25a64ed..3620c569275f 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -37,7 +37,6 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{ 37{
38 struct rtable *rt; 38 struct rtable *rt;
39 struct flowi fl; 39 struct flowi fl;
40 int ret;
41 40
42 peer->if_mtu = 1500; 41 peer->if_mtu = 1500;
43 42
@@ -58,9 +57,9 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
58 BUG(); 57 BUG();
59 } 58 }
60 59
61 ret = ip_route_output_key(&init_net, &rt, &fl); 60 rt = ip_route_output_key(&init_net, &fl);
62 if (ret < 0) { 61 if (IS_ERR(rt)) {
63 _leave(" [route err %d]", ret); 62 _leave(" [route err %ld]", PTR_ERR(rt));
64 return; 63 return;
65 } 64 }
66 65
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index d580cdfca093..a907905376df 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 if (head == NULL) 143 if (head == NULL)
144 goto old_method; 144 goto old_method;
145 145
146 iif = ((struct rtable *)dst)->fl.iif; 146 iif = ((struct rtable *)dst)->rt_iif;
147 147
148 h = route4_fastmap_hash(id, iif); 148 h = route4_fastmap_hash(id, iif);
149 if (id == head->fastmap[h].id && 149 if (id == head->fastmap[h].id &&
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index e5e174782677..a4de67eca824 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif)
264 if (unlikely(skb_rtable(skb) == NULL)) 264 if (unlikely(skb_rtable(skb) == NULL))
265 *err = -1; 265 *err = -1;
266 else 266 else
267 dst->value = skb_rtable(skb)->fl.iif; 267 dst->value = skb_rtable(skb)->rt_iif;
268} 268}
269 269
270/************************************************************************** 270/**************************************************************************
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index be33f9ddf9dd..66effe2da8e0 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,15 +19,9 @@
19 19
20/* 1 band FIFO pseudo-"scheduler" */ 20/* 1 band FIFO pseudo-"scheduler" */
21 21
22struct fifo_sched_data {
23 u32 limit;
24};
25
26static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
27{ 23{
28 struct fifo_sched_data *q = qdisc_priv(sch); 24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
29
30 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
31 return qdisc_enqueue_tail(skb, sch); 25 return qdisc_enqueue_tail(skb, sch);
32 26
33 return qdisc_reshape_fail(skb, sch); 27 return qdisc_reshape_fail(skb, sch);
@@ -35,9 +29,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
35 29
36static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) 30static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
37{ 31{
38 struct fifo_sched_data *q = qdisc_priv(sch); 32 if (likely(skb_queue_len(&sch->q) < sch->limit))
39
40 if (likely(skb_queue_len(&sch->q) < q->limit))
41 return qdisc_enqueue_tail(skb, sch); 33 return qdisc_enqueue_tail(skb, sch);
42 34
43 return qdisc_reshape_fail(skb, sch); 35 return qdisc_reshape_fail(skb, sch);
@@ -45,9 +37,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
45 37
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) 38static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
47{ 39{
48 struct fifo_sched_data *q = qdisc_priv(sch); 40 if (likely(skb_queue_len(&sch->q) < sch->limit))
49
50 if (likely(skb_queue_len(&sch->q) < q->limit))
51 return qdisc_enqueue_tail(skb, sch); 41 return qdisc_enqueue_tail(skb, sch);
52 42
53 /* queue full, remove one skb to fulfill the limit */ 43 /* queue full, remove one skb to fulfill the limit */
@@ -60,7 +50,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
60 50
61static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 51static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
62{ 52{
63 struct fifo_sched_data *q = qdisc_priv(sch);
64 bool bypass; 53 bool bypass;
65 bool is_bfifo = sch->ops == &bfifo_qdisc_ops; 54 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
66 55
@@ -70,20 +59,20 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
70 if (is_bfifo) 59 if (is_bfifo)
71 limit *= psched_mtu(qdisc_dev(sch)); 60 limit *= psched_mtu(qdisc_dev(sch));
72 61
73 q->limit = limit; 62 sch->limit = limit;
74 } else { 63 } else {
75 struct tc_fifo_qopt *ctl = nla_data(opt); 64 struct tc_fifo_qopt *ctl = nla_data(opt);
76 65
77 if (nla_len(opt) < sizeof(*ctl)) 66 if (nla_len(opt) < sizeof(*ctl))
78 return -EINVAL; 67 return -EINVAL;
79 68
80 q->limit = ctl->limit; 69 sch->limit = ctl->limit;
81 } 70 }
82 71
83 if (is_bfifo) 72 if (is_bfifo)
84 bypass = q->limit >= psched_mtu(qdisc_dev(sch)); 73 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
85 else 74 else
86 bypass = q->limit >= 1; 75 bypass = sch->limit >= 1;
87 76
88 if (bypass) 77 if (bypass)
89 sch->flags |= TCQ_F_CAN_BYPASS; 78 sch->flags |= TCQ_F_CAN_BYPASS;
@@ -94,8 +83,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
94 83
95static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) 84static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
96{ 85{
97 struct fifo_sched_data *q = qdisc_priv(sch); 86 struct tc_fifo_qopt opt = { .limit = sch->limit };
98 struct tc_fifo_qopt opt = { .limit = q->limit };
99 87
100 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 88 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
101 return skb->len; 89 return skb->len;
@@ -106,7 +94,7 @@ nla_put_failure:
106 94
107struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { 95struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
108 .id = "pfifo", 96 .id = "pfifo",
109 .priv_size = sizeof(struct fifo_sched_data), 97 .priv_size = 0,
110 .enqueue = pfifo_enqueue, 98 .enqueue = pfifo_enqueue,
111 .dequeue = qdisc_dequeue_head, 99 .dequeue = qdisc_dequeue_head,
112 .peek = qdisc_peek_head, 100 .peek = qdisc_peek_head,
@@ -121,7 +109,7 @@ EXPORT_SYMBOL(pfifo_qdisc_ops);
121 109
122struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { 110struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
123 .id = "bfifo", 111 .id = "bfifo",
124 .priv_size = sizeof(struct fifo_sched_data), 112 .priv_size = 0,
125 .enqueue = bfifo_enqueue, 113 .enqueue = bfifo_enqueue,
126 .dequeue = qdisc_dequeue_head, 114 .dequeue = qdisc_dequeue_head,
127 .peek = qdisc_peek_head, 115 .peek = qdisc_peek_head,
@@ -136,7 +124,7 @@ EXPORT_SYMBOL(bfifo_qdisc_ops);
136 124
137struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = { 125struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
138 .id = "pfifo_head_drop", 126 .id = "pfifo_head_drop",
139 .priv_size = sizeof(struct fifo_sched_data), 127 .priv_size = 0,
140 .enqueue = pfifo_tail_enqueue, 128 .enqueue = pfifo_tail_enqueue,
141 .dequeue = qdisc_dequeue_head, 129 .dequeue = qdisc_dequeue_head,
142 .peek = qdisc_peek_head, 130 .peek = qdisc_peek_head,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0da09d508737..c84b65920d1b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -550,21 +550,25 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
550{ 550{
551 void *p; 551 void *p;
552 struct Qdisc *sch; 552 struct Qdisc *sch;
553 unsigned int size; 553 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
554 int err = -ENOBUFS; 554 int err = -ENOBUFS;
555 555
556 /* ensure that the Qdisc and the private data are 64-byte aligned */
557 size = QDISC_ALIGN(sizeof(*sch));
558 size += ops->priv_size + (QDISC_ALIGNTO - 1);
559
560 p = kzalloc_node(size, GFP_KERNEL, 556 p = kzalloc_node(size, GFP_KERNEL,
561 netdev_queue_numa_node_read(dev_queue)); 557 netdev_queue_numa_node_read(dev_queue));
562 558
563 if (!p) 559 if (!p)
564 goto errout; 560 goto errout;
565 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
566 sch->padded = (char *) sch - (char *) p; 562 /* if we got non aligned memory, ask more and do alignment ourself */
567 563 if (sch != p) {
564 kfree(p);
565 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
566 netdev_queue_numa_node_read(dev_queue));
567 if (!p)
568 goto errout;
569 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
570 sch->padded = (char *) sch - (char *) p;
571 }
568 INIT_LIST_HEAD(&sch->list); 572 INIT_LIST_HEAD(&sch->list);
569 skb_queue_head_init(&sch->q); 573 skb_queue_head_init(&sch->q);
570 spin_lock_init(&sch->busylock); 574 spin_lock_init(&sch->busylock);
@@ -840,6 +844,7 @@ void dev_deactivate(struct net_device *dev)
840 844
841 list_add(&dev->unreg_list, &single); 845 list_add(&dev->unreg_list, &single);
842 dev_deactivate_many(&single); 846 dev_deactivate_many(&single);
847 list_del(&single);
843} 848}
844EXPORT_SYMBOL(dev_deactivate); 849EXPORT_SYMBOL(dev_deactivate);
845 850
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5f1fb8bd862d..6b04287913cd 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1089,7 +1089,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1089 base.inqueue.immediate); 1089 base.inqueue.immediate);
1090 struct sctp_endpoint *ep; 1090 struct sctp_endpoint *ep;
1091 struct sctp_chunk *chunk; 1091 struct sctp_chunk *chunk;
1092 struct sock *sk;
1093 struct sctp_inq *inqueue; 1092 struct sctp_inq *inqueue;
1094 int state; 1093 int state;
1095 sctp_subtype_t subtype; 1094 sctp_subtype_t subtype;
@@ -1097,7 +1096,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1097 1096
1098 /* The association should be held so we should be safe. */ 1097 /* The association should be held so we should be safe. */
1099 ep = asoc->ep; 1098 ep = asoc->ep;
1100 sk = asoc->base.sk;
1101 1099
1102 inqueue = &asoc->base.inqueue; 1100 inqueue = &asoc->base.inqueue;
1103 sctp_association_hold(asoc); 1101 sctp_association_hold(asoc);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index ea2192444ce6..826661be73e7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -948,14 +948,11 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
948 union sctp_addr addr; 948 union sctp_addr addr;
949 union sctp_addr *paddr = &addr; 949 union sctp_addr *paddr = &addr;
950 struct sctphdr *sh = sctp_hdr(skb); 950 struct sctphdr *sh = sctp_hdr(skb);
951 sctp_chunkhdr_t *ch;
952 union sctp_params params; 951 union sctp_params params;
953 sctp_init_chunk_t *init; 952 sctp_init_chunk_t *init;
954 struct sctp_transport *transport; 953 struct sctp_transport *transport;
955 struct sctp_af *af; 954 struct sctp_af *af;
956 955
957 ch = (sctp_chunkhdr_t *) skb->data;
958
959 /* 956 /*
960 * This code will NOT touch anything inside the chunk--it is 957 * This code will NOT touch anything inside the chunk--it is
961 * strictly READ-ONLY. 958 * strictly READ-ONLY.
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8c6d379b4bb6..26dc005113a0 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -545,13 +545,11 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
545 struct sctp_transport *transport = pkt->transport; 545 struct sctp_transport *transport = pkt->transport;
546 sctp_xmit_t status; 546 sctp_xmit_t status;
547 struct sctp_chunk *chunk, *chunk1; 547 struct sctp_chunk *chunk, *chunk1;
548 struct sctp_association *asoc;
549 int fast_rtx; 548 int fast_rtx;
550 int error = 0; 549 int error = 0;
551 int timer = 0; 550 int timer = 0;
552 int done = 0; 551 int done = 0;
553 552
554 asoc = q->asoc;
555 lqueue = &q->retransmit; 553 lqueue = &q->retransmit;
556 fast_rtx = q->fast_rtx; 554 fast_rtx = q->fast_rtx;
557 555
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e58f9476f29c..4e55e6c49ec9 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -491,9 +491,9 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
491 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 491 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
492 __func__, &fl.fl4_dst, &fl.fl4_src); 492 __func__, &fl.fl4_dst, &fl.fl4_src);
493 493
494 if (!ip_route_output_key(&init_net, &rt, &fl)) { 494 rt = ip_route_output_key(&init_net, &fl);
495 if (!IS_ERR(rt))
495 dst = &rt->dst; 496 dst = &rt->dst;
496 }
497 497
498 /* If there is no association or if a source address is passed, no 498 /* If there is no association or if a source address is passed, no
499 * more validation is required. 499 * more validation is required.
@@ -535,7 +535,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
535 (AF_INET == laddr->a.sa.sa_family)) { 535 (AF_INET == laddr->a.sa.sa_family)) {
536 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 536 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
537 fl.fl_ip_sport = laddr->a.v4.sin_port; 537 fl.fl_ip_sport = laddr->a.v4.sin_port;
538 if (!ip_route_output_key(&init_net, &rt, &fl)) { 538 rt = ip_route_output_key(&init_net, &fl);
539 if (!IS_ERR(rt)) {
539 dst = &rt->dst; 540 dst = &rt->dst;
540 goto out_unlock; 541 goto out_unlock;
541 } 542 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b23428f3c0dd..de98665db524 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3375,7 +3375,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
3375 struct sctp_fwdtsn_skip *skiplist) 3375 struct sctp_fwdtsn_skip *skiplist)
3376{ 3376{
3377 struct sctp_chunk *retval = NULL; 3377 struct sctp_chunk *retval = NULL;
3378 struct sctp_fwdtsn_chunk *ftsn_chunk;
3379 struct sctp_fwdtsn_hdr ftsn_hdr; 3378 struct sctp_fwdtsn_hdr ftsn_hdr;
3380 struct sctp_fwdtsn_skip skip; 3379 struct sctp_fwdtsn_skip skip;
3381 size_t hint; 3380 size_t hint;
@@ -3388,8 +3387,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
3388 if (!retval) 3387 if (!retval)
3389 return NULL; 3388 return NULL;
3390 3389
3391 ftsn_chunk = (struct sctp_fwdtsn_chunk *)retval->subh.fwdtsn_hdr;
3392
3393 ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); 3390 ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
3394 retval->subh.fwdtsn_hdr = 3391 retval->subh.fwdtsn_hdr =
3395 sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); 3392 sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b53b2ebbb198..3951a10605bc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2928,7 +2928,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2928 unsigned int optlen) 2928 unsigned int optlen)
2929{ 2929{
2930 struct sctp_sock *sp; 2930 struct sctp_sock *sp;
2931 struct sctp_endpoint *ep;
2932 struct sctp_association *asoc = NULL; 2931 struct sctp_association *asoc = NULL;
2933 struct sctp_setpeerprim prim; 2932 struct sctp_setpeerprim prim;
2934 struct sctp_chunk *chunk; 2933 struct sctp_chunk *chunk;
@@ -2936,7 +2935,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2936 int err; 2935 int err;
2937 2936
2938 sp = sctp_sk(sk); 2937 sp = sctp_sk(sk);
2939 ep = sp->ep;
2940 2938
2941 if (!sctp_addip_enable) 2939 if (!sctp_addip_enable)
2942 return -EPERM; 2940 return -EPERM;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index c7f7e49609cb..17678189d054 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -105,11 +105,8 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
105 gfp_t gfp) 105 gfp_t gfp)
106{ 106{
107 struct sk_buff_head temp; 107 struct sk_buff_head temp;
108 sctp_data_chunk_t *hdr;
109 struct sctp_ulpevent *event; 108 struct sctp_ulpevent *event;
110 109
111 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
112
113 /* Create an event from the incoming chunk. */ 110 /* Create an event from the incoming chunk. */
114 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
115 if (!event) 112 if (!event)
@@ -743,11 +740,9 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
743 struct sk_buff *pos, *tmp; 740 struct sk_buff *pos, *tmp;
744 struct sctp_ulpevent *cevent; 741 struct sctp_ulpevent *cevent;
745 struct sctp_stream *in; 742 struct sctp_stream *in;
746 __u16 sid, csid; 743 __u16 sid, csid, cssn;
747 __u16 ssn, cssn;
748 744
749 sid = event->stream; 745 sid = event->stream;
750 ssn = event->ssn;
751 in = &ulpq->asoc->ssnmap->in; 746 in = &ulpq->asoc->ssnmap->in;
752 747
753 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; 748 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 217fb7f34d52..df5997d25826 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1567,7 +1567,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1567 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1567 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1568 struct sock *sk = sock->sk; 1568 struct sock *sk = sock->sk;
1569 struct sock *other = NULL; 1569 struct sock *other = NULL;
1570 struct sockaddr_un *sunaddr = msg->msg_name;
1571 int err, size; 1570 int err, size;
1572 struct sk_buff *skb; 1571 struct sk_buff *skb;
1573 int sent = 0; 1572 int sent = 0;
@@ -1590,7 +1589,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1590 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 1589 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1591 goto out_err; 1590 goto out_err;
1592 } else { 1591 } else {
1593 sunaddr = NULL;
1594 err = -ENOTCONN; 1592 err = -ENOTCONN;
1595 other = unix_peer(sk); 1593 other = unix_peer(sk);
1596 if (!other) 1594 if (!other)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 864ddfbeff2f..4ebce4284e9d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1968,13 +1968,41 @@ static int parse_station_flags(struct genl_info *info,
1968 return 0; 1968 return 0;
1969} 1969}
1970 1970
1971static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
1972 int attr)
1973{
1974 struct nlattr *rate;
1975 u16 bitrate;
1976
1977 rate = nla_nest_start(msg, attr);
1978 if (!rate)
1979 goto nla_put_failure;
1980
1981 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
1982 bitrate = cfg80211_calculate_bitrate(info);
1983 if (bitrate > 0)
1984 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
1985
1986 if (info->flags & RATE_INFO_FLAGS_MCS)
1987 NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
1988 if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
1989 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
1990 if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
1991 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
1992
1993 nla_nest_end(msg, rate);
1994 return true;
1995
1996nla_put_failure:
1997 return false;
1998}
1999
1971static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 2000static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1972 int flags, struct net_device *dev, 2001 int flags, struct net_device *dev,
1973 const u8 *mac_addr, struct station_info *sinfo) 2002 const u8 *mac_addr, struct station_info *sinfo)
1974{ 2003{
1975 void *hdr; 2004 void *hdr;
1976 struct nlattr *sinfoattr, *txrate; 2005 struct nlattr *sinfoattr;
1977 u16 bitrate;
1978 2006
1979 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 2007 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
1980 if (!hdr) 2008 if (!hdr)
@@ -2013,24 +2041,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2013 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, 2041 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
2014 sinfo->signal_avg); 2042 sinfo->signal_avg);
2015 if (sinfo->filled & STATION_INFO_TX_BITRATE) { 2043 if (sinfo->filled & STATION_INFO_TX_BITRATE) {
2016 txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE); 2044 if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
2017 if (!txrate) 2045 NL80211_STA_INFO_TX_BITRATE))
2046 goto nla_put_failure;
2047 }
2048 if (sinfo->filled & STATION_INFO_RX_BITRATE) {
2049 if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
2050 NL80211_STA_INFO_RX_BITRATE))
2018 goto nla_put_failure; 2051 goto nla_put_failure;
2019
2020 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
2021 bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
2022 if (bitrate > 0)
2023 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
2024
2025 if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS)
2026 NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS,
2027 sinfo->txrate.mcs);
2028 if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
2029 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
2030 if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI)
2031 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
2032
2033 nla_nest_end(msg, txrate);
2034 } 2052 }
2035 if (sinfo->filled & STATION_INFO_RX_PACKETS) 2053 if (sinfo->filled & STATION_INFO_RX_PACKETS)
2036 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, 2054 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 7f1f4ec49041..0bf169bb770e 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -805,11 +805,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
805 return freq; 805 return freq;
806 if (freq == 0) 806 if (freq == 0)
807 return -EINVAL; 807 return -EINVAL;
808 wdev_lock(wdev);
809 mutex_lock(&rdev->devlist_mtx); 808 mutex_lock(&rdev->devlist_mtx);
809 wdev_lock(wdev);
810 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 810 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
811 mutex_unlock(&rdev->devlist_mtx);
812 wdev_unlock(wdev); 811 wdev_unlock(wdev);
812 mutex_unlock(&rdev->devlist_mtx);
813 return err; 813 return err;
814 default: 814 default:
815 return -EOPNOTSUPP; 815 return -EOPNOTSUPP;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41a91d27d3ea..b1932a629ef8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1735,19 +1735,36 @@ error:
1735 return ERR_PTR(err); 1735 return ERR_PTR(err);
1736} 1736}
1737 1737
1738static struct dst_entry *make_blackhole(struct net *net, u16 family,
1739 struct dst_entry *dst_orig)
1740{
1741 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1742 struct dst_entry *ret;
1743
1744 if (!afinfo) {
1745 dst_release(dst_orig);
1746 ret = ERR_PTR(-EINVAL);
1747 } else {
1748 ret = afinfo->blackhole_route(net, dst_orig);
1749 }
1750 xfrm_policy_put_afinfo(afinfo);
1751
1752 return ret;
1753}
1754
1738/* Main function: finds/creates a bundle for given flow. 1755/* Main function: finds/creates a bundle for given flow.
1739 * 1756 *
1740 * At the moment we eat a raw IP route. Mostly to speed up lookups 1757 * At the moment we eat a raw IP route. Mostly to speed up lookups
1741 * on interfaces with disabled IPsec. 1758 * on interfaces with disabled IPsec.
1742 */ 1759 */
1743int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 1760struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
1744 const struct flowi *fl, 1761 const struct flowi *fl,
1745 struct sock *sk, int flags) 1762 struct sock *sk, int flags)
1746{ 1763{
1747 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1764 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1748 struct flow_cache_object *flo; 1765 struct flow_cache_object *flo;
1749 struct xfrm_dst *xdst; 1766 struct xfrm_dst *xdst;
1750 struct dst_entry *dst, *dst_orig = *dst_p, *route; 1767 struct dst_entry *dst, *route;
1751 u16 family = dst_orig->ops->family; 1768 u16 family = dst_orig->ops->family;
1752 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1769 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1753 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1770 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
@@ -1829,9 +1846,10 @@ restart:
1829 dst_release(dst); 1846 dst_release(dst);
1830 xfrm_pols_put(pols, drop_pols); 1847 xfrm_pols_put(pols, drop_pols);
1831 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1848 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1832 return -EREMOTE; 1849
1850 return make_blackhole(net, family, dst_orig);
1833 } 1851 }
1834 if (flags & XFRM_LOOKUP_WAIT) { 1852 if (fl->flags & FLOWI_FLAG_CAN_SLEEP) {
1835 DECLARE_WAITQUEUE(wait, current); 1853 DECLARE_WAITQUEUE(wait, current);
1836 1854
1837 add_wait_queue(&net->xfrm.km_waitq, &wait); 1855 add_wait_queue(&net->xfrm.km_waitq, &wait);
@@ -1873,43 +1891,28 @@ no_transform:
1873 goto error; 1891 goto error;
1874 } else if (num_xfrms > 0) { 1892 } else if (num_xfrms > 0) {
1875 /* Flow transformed */ 1893 /* Flow transformed */
1876 *dst_p = dst;
1877 dst_release(dst_orig); 1894 dst_release(dst_orig);
1878 } else { 1895 } else {
1879 /* Flow passes untransformed */ 1896 /* Flow passes untransformed */
1880 dst_release(dst); 1897 dst_release(dst);
1898 dst = dst_orig;
1881 } 1899 }
1882ok: 1900ok:
1883 xfrm_pols_put(pols, drop_pols); 1901 xfrm_pols_put(pols, drop_pols);
1884 return 0; 1902 return dst;
1885 1903
1886nopol: 1904nopol:
1887 if (!(flags & XFRM_LOOKUP_ICMP)) 1905 if (!(flags & XFRM_LOOKUP_ICMP)) {
1906 dst = dst_orig;
1888 goto ok; 1907 goto ok;
1908 }
1889 err = -ENOENT; 1909 err = -ENOENT;
1890error: 1910error:
1891 dst_release(dst); 1911 dst_release(dst);
1892dropdst: 1912dropdst:
1893 dst_release(dst_orig); 1913 dst_release(dst_orig);
1894 *dst_p = NULL;
1895 xfrm_pols_put(pols, drop_pols); 1914 xfrm_pols_put(pols, drop_pols);
1896 return err; 1915 return ERR_PTR(err);
1897}
1898EXPORT_SYMBOL(__xfrm_lookup);
1899
1900int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
1901 const struct flowi *fl,
1902 struct sock *sk, int flags)
1903{
1904 int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
1905
1906 if (err == -EREMOTE) {
1907 dst_release(*dst_p);
1908 *dst_p = NULL;
1909 err = -EAGAIN;
1910 }
1911
1912 return err;
1913} 1916}
1914EXPORT_SYMBOL(xfrm_lookup); 1917EXPORT_SYMBOL(xfrm_lookup);
1915 1918
@@ -2169,7 +2172,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2169 struct net *net = dev_net(skb->dev); 2172 struct net *net = dev_net(skb->dev);
2170 struct flowi fl; 2173 struct flowi fl;
2171 struct dst_entry *dst; 2174 struct dst_entry *dst;
2172 int res; 2175 int res = 0;
2173 2176
2174 if (xfrm_decode_session(skb, &fl, family) < 0) { 2177 if (xfrm_decode_session(skb, &fl, family) < 0) {
2175 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
@@ -2177,9 +2180,12 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2177 } 2180 }
2178 2181
2179 skb_dst_force(skb); 2182 skb_dst_force(skb);
2180 dst = skb_dst(skb);
2181 2183
2182 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; 2184 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2185 if (IS_ERR(dst)) {
2186 res = 1;
2187 dst = NULL;
2188 }
2183 skb_dst_set(skb, dst); 2189 skb_dst_set(skb, dst);
2184 return res; 2190 return res;
2185} 2191}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 673698d380d7..468ab60d3dc0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -497,9 +497,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
497 struct xfrm_state *x; 497 struct xfrm_state *x;
498 int err; 498 int err;
499 struct km_event c; 499 struct km_event c;
500 uid_t loginuid = NETLINK_CB(skb).loginuid; 500 uid_t loginuid = audit_get_loginuid(current);
501 u32 sessionid = NETLINK_CB(skb).sessionid; 501 u32 sessionid = audit_get_sessionid(current);
502 u32 sid = NETLINK_CB(skb).sid; 502 u32 sid;
503 503
504 err = verify_newsa_info(p, attrs); 504 err = verify_newsa_info(p, attrs);
505 if (err) 505 if (err)
@@ -515,6 +515,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
515 else 515 else
516 err = xfrm_state_update(x); 516 err = xfrm_state_update(x);
517 517
518 security_task_getsecid(current, &sid);
518 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); 519 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
519 520
520 if (err < 0) { 521 if (err < 0) {
@@ -575,9 +576,9 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
575 int err = -ESRCH; 576 int err = -ESRCH;
576 struct km_event c; 577 struct km_event c;
577 struct xfrm_usersa_id *p = nlmsg_data(nlh); 578 struct xfrm_usersa_id *p = nlmsg_data(nlh);
578 uid_t loginuid = NETLINK_CB(skb).loginuid; 579 uid_t loginuid = audit_get_loginuid(current);
579 u32 sessionid = NETLINK_CB(skb).sessionid; 580 u32 sessionid = audit_get_sessionid(current);
580 u32 sid = NETLINK_CB(skb).sid; 581 u32 sid;
581 582
582 x = xfrm_user_state_lookup(net, p, attrs, &err); 583 x = xfrm_user_state_lookup(net, p, attrs, &err);
583 if (x == NULL) 584 if (x == NULL)
@@ -602,6 +603,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
602 km_state_notify(x, &c); 603 km_state_notify(x, &c);
603 604
604out: 605out:
606 security_task_getsecid(current, &sid);
605 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); 607 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
606 xfrm_state_put(x); 608 xfrm_state_put(x);
607 return err; 609 return err;
@@ -1265,9 +1267,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1265 struct km_event c; 1267 struct km_event c;
1266 int err; 1268 int err;
1267 int excl; 1269 int excl;
1268 uid_t loginuid = NETLINK_CB(skb).loginuid; 1270 uid_t loginuid = audit_get_loginuid(current);
1269 u32 sessionid = NETLINK_CB(skb).sessionid; 1271 u32 sessionid = audit_get_sessionid(current);
1270 u32 sid = NETLINK_CB(skb).sid; 1272 u32 sid;
1271 1273
1272 err = verify_newpolicy_info(p); 1274 err = verify_newpolicy_info(p);
1273 if (err) 1275 if (err)
@@ -1286,6 +1288,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1286 * a type XFRM_MSG_UPDPOLICY - JHS */ 1288 * a type XFRM_MSG_UPDPOLICY - JHS */
1287 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1289 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1288 err = xfrm_policy_insert(p->dir, xp, excl); 1290 err = xfrm_policy_insert(p->dir, xp, excl);
1291 security_task_getsecid(current, &sid);
1289 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); 1292 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1290 1293
1291 if (err) { 1294 if (err) {
@@ -1522,10 +1525,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1522 NETLINK_CB(skb).pid); 1525 NETLINK_CB(skb).pid);
1523 } 1526 }
1524 } else { 1527 } else {
1525 uid_t loginuid = NETLINK_CB(skb).loginuid; 1528 uid_t loginuid = audit_get_loginuid(current);
1526 u32 sessionid = NETLINK_CB(skb).sessionid; 1529 u32 sessionid = audit_get_sessionid(current);
1527 u32 sid = NETLINK_CB(skb).sid; 1530 u32 sid;
1528 1531
1532 security_task_getsecid(current, &sid);
1529 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, 1533 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1530 sid); 1534 sid);
1531 1535
@@ -1553,9 +1557,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1553 struct xfrm_audit audit_info; 1557 struct xfrm_audit audit_info;
1554 int err; 1558 int err;
1555 1559
1556 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1560 audit_info.loginuid = audit_get_loginuid(current);
1557 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1561 audit_info.sessionid = audit_get_sessionid(current);
1558 audit_info.secid = NETLINK_CB(skb).sid; 1562 security_task_getsecid(current, &audit_info.secid);
1559 err = xfrm_state_flush(net, p->proto, &audit_info); 1563 err = xfrm_state_flush(net, p->proto, &audit_info);
1560 if (err) { 1564 if (err) {
1561 if (err == -ESRCH) /* empty table */ 1565 if (err == -ESRCH) /* empty table */
@@ -1720,9 +1724,9 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1720 if (err) 1724 if (err)
1721 return err; 1725 return err;
1722 1726
1723 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1727 audit_info.loginuid = audit_get_loginuid(current);
1724 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1728 audit_info.sessionid = audit_get_sessionid(current);
1725 audit_info.secid = NETLINK_CB(skb).sid; 1729 security_task_getsecid(current, &audit_info.secid);
1726 err = xfrm_policy_flush(net, type, &audit_info); 1730 err = xfrm_policy_flush(net, type, &audit_info);
1727 if (err) { 1731 if (err) {
1728 if (err == -ESRCH) /* empty table */ 1732 if (err == -ESRCH) /* empty table */
@@ -1789,9 +1793,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1789 1793
1790 err = 0; 1794 err = 0;
1791 if (up->hard) { 1795 if (up->hard) {
1792 uid_t loginuid = NETLINK_CB(skb).loginuid; 1796 uid_t loginuid = audit_get_loginuid(current);
1793 uid_t sessionid = NETLINK_CB(skb).sessionid; 1797 u32 sessionid = audit_get_sessionid(current);
1794 u32 sid = NETLINK_CB(skb).sid; 1798 u32 sid;
1799
1800 security_task_getsecid(current, &sid);
1795 xfrm_policy_delete(xp, p->dir); 1801 xfrm_policy_delete(xp, p->dir);
1796 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1802 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1797 1803
@@ -1830,9 +1836,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1830 km_state_expired(x, ue->hard, current->pid); 1836 km_state_expired(x, ue->hard, current->pid);
1831 1837
1832 if (ue->hard) { 1838 if (ue->hard) {
1833 uid_t loginuid = NETLINK_CB(skb).loginuid; 1839 uid_t loginuid = audit_get_loginuid(current);
1834 uid_t sessionid = NETLINK_CB(skb).sessionid; 1840 u32 sessionid = audit_get_sessionid(current);
1835 u32 sid = NETLINK_CB(skb).sid; 1841 u32 sid;
1842
1843 security_task_getsecid(current, &sid);
1836 __xfrm_state_delete(x); 1844 __xfrm_state_delete(x);
1837 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 1845 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1838 } 1846 }
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index c9a16abacab4..6c94c6ce2925 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len)
315 char *end = m + len; 315 char *end = m + len;
316 char *p; 316 char *p;
317 char s[PATH_MAX]; 317 char s[PATH_MAX];
318 int first;
318 319
319 p = strchr(m, ':'); 320 p = strchr(m, ':');
320 if (!p) { 321 if (!p) {
@@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len)
327 328
328 clear_config(); 329 clear_config();
329 330
331 first = 1;
330 while (m < end) { 332 while (m < end) {
331 while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) 333 while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
332 m++; 334 m++;
@@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len)
340 if (strrcmp(s, "include/generated/autoconf.h") && 342 if (strrcmp(s, "include/generated/autoconf.h") &&
341 strrcmp(s, "arch/um/include/uml-config.h") && 343 strrcmp(s, "arch/um/include/uml-config.h") &&
342 strrcmp(s, ".ver")) { 344 strrcmp(s, ".ver")) {
343 printf(" %s \\\n", s); 345 /*
346 * Do not output the first dependency (the
347 * source file), so that kbuild is not confused
348 * if a .c file is rewritten into .S or vice
349 * versa.
350 */
351 if (!first)
352 printf(" %s \\\n", s);
344 do_config_file(s); 353 do_config_file(s);
345 } 354 }
355 first = 0;
346 m = p + 1; 356 m = p + 1;
347 } 357 }
348 printf("\n%s: $(deps_%s)\n\n", target, target); 358 printf("\n%s: $(deps_%s)\n\n", target, target);
diff --git a/security/commoncap.c b/security/commoncap.c
index 64c2ed9c9015..a83e607d91c3 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -52,13 +52,12 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
52 52
53int cap_netlink_send(struct sock *sk, struct sk_buff *skb) 53int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
54{ 54{
55 NETLINK_CB(skb).eff_cap = current_cap();
56 return 0; 55 return 0;
57} 56}
58 57
59int cap_netlink_recv(struct sk_buff *skb, int cap) 58int cap_netlink_recv(struct sk_buff *skb, int cap)
60{ 59{
61 if (!cap_raised(NETLINK_CB(skb).eff_cap, cap)) 60 if (!cap_raised(current_cap(), cap))
62 return -EPERM; 61 return -EPERM;
63 return 0; 62 return 0;
64} 63}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c8d699270687..cef42f5d69a2 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4669,6 +4669,7 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
4669{ 4669{
4670 int err; 4670 int err;
4671 struct common_audit_data ad; 4671 struct common_audit_data ad;
4672 u32 sid;
4672 4673
4673 err = cap_netlink_recv(skb, capability); 4674 err = cap_netlink_recv(skb, capability);
4674 if (err) 4675 if (err)
@@ -4677,8 +4678,9 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
4677 COMMON_AUDIT_DATA_INIT(&ad, CAP); 4678 COMMON_AUDIT_DATA_INIT(&ad, CAP);
4678 ad.u.cap = capability; 4679 ad.u.cap = capability;
4679 4680
4680 return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid, 4681 security_task_getsecid(current, &sid);
4681 SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad); 4682 return avc_has_perm(sid, sid, SECCLASS_CAPABILITY,
4683 CAP_TO_MASK(capability), &ad);
4682} 4684}
4683 4685
4684static int ipc_alloc_security(struct task_struct *task, 4686static int ipc_alloc_security(struct task_struct *task,
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae568730..53b53e97c896 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
141 141
142fail_input: 142fail_input:
143 input_free_device(jack->input_dev); 143 input_free_device(jack->input_dev);
144 kfree(jack->id);
144 kfree(jack); 145 kfree(jack);
145 return err; 146 return err;
146} 147}
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 23f49f356e0f..16c0bdfbb164 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
1252static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) 1252static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
1253{ 1253{
1254 stream_t *dma = &vortex->dma_adb[adbdma]; 1254 stream_t *dma = &vortex->dma_adb[adbdma];
1255 int temp; 1255 int temp, page, delta;
1256 1256
1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); 1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
1258 temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); 1258 page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
1259 return temp; 1259 if (dma->nr_periods >= 4)
1260 delta = (page - dma->period_real) & 3;
1261 else {
1262 delta = (page - dma->period_real);
1263 if (delta < 0)
1264 delta += dma->nr_periods;
1265 }
1266 return (dma->period_virt + delta) * dma->period_bytes
1267 + (temp & (dma->period_bytes - 1));
1260} 1268}
1261 1269
1262static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) 1270static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 0baffcdee8f9..fcedad9a5fef 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2308 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2308 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2309 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), 2309 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
2310 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), 2310 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
2311 SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
2311 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), 2312 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
2312 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), 2313 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2313 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), 2314 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fbe97d32140d..4d5004e693f0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3114 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), 3114 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
3115 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), 3115 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
3116 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3116 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3117 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
3118 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
3117 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), 3119 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3118 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), 3120 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
3119 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), 3121 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3410,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec)
3410 } 3412 }
3411 } 3413 }
3412 spec->multiout.dac_nids = spec->private_dac_nids; 3414 spec->multiout.dac_nids = spec->private_dac_nids;
3413 spec->multiout.max_channels = nums * 2; 3415 spec->multiout.max_channels = spec->multiout.num_dacs * 2;
3414 3416
3415 if (cfg->hp_outs > 0) 3417 if (cfg->hp_outs > 0)
3416 spec->auto_mute = 1; 3418 spec->auto_mute = 1;
@@ -3729,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec)
3729 return 0; 3731 return 0;
3730} 3732}
3731 3733
3732static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, 3734static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
3733 const char *dir, int cidx, 3735 const char *dir, int cidx,
3734 hda_nid_t nid, int hda_dir) 3736 hda_nid_t nid, int hda_dir, int amp_idx)
3735{ 3737{
3736 static char name[32]; 3738 static char name[32];
3737 static struct snd_kcontrol_new knew[] = { 3739 static struct snd_kcontrol_new knew[] = {
@@ -3743,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
3743 3745
3744 for (i = 0; i < 2; i++) { 3746 for (i = 0; i < 2; i++) {
3745 struct snd_kcontrol *kctl; 3747 struct snd_kcontrol *kctl;
3746 knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); 3748 knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
3749 hda_dir);
3747 knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; 3750 knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
3748 knew[i].index = cidx; 3751 knew[i].index = cidx;
3749 snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); 3752 snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]);
@@ -3759,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
3759 return 0; 3762 return 0;
3760} 3763}
3761 3764
3765#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
3766 cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
3767
3762#define cx_auto_add_pb_volume(codec, nid, str, idx) \ 3768#define cx_auto_add_pb_volume(codec, nid, str, idx) \
3763 cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) 3769 cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
3764 3770
@@ -3808,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
3808 struct conexant_spec *spec = codec->spec; 3814 struct conexant_spec *spec = codec->spec;
3809 struct auto_pin_cfg *cfg = &spec->autocfg; 3815 struct auto_pin_cfg *cfg = &spec->autocfg;
3810 static const char *prev_label; 3816 static const char *prev_label;
3811 int i, err, cidx; 3817 int i, err, cidx, conn_len;
3818 hda_nid_t conn[HDA_MAX_CONNECTIONS];
3819
3820 int multi_adc_volume = 0; /* If the ADC nid has several input volumes */
3821 int adc_nid = spec->adc_nids[0];
3822
3823 conn_len = snd_hda_get_connections(codec, adc_nid, conn,
3824 HDA_MAX_CONNECTIONS);
3825 if (conn_len < 0)
3826 return conn_len;
3827
3828 multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1;
3829 if (!multi_adc_volume) {
3830 err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid,
3831 HDA_INPUT);
3832 if (err < 0)
3833 return err;
3834 }
3812 3835
3813 err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0],
3814 HDA_INPUT);
3815 if (err < 0)
3816 return err;
3817 prev_label = NULL; 3836 prev_label = NULL;
3818 cidx = 0; 3837 cidx = 0;
3819 for (i = 0; i < cfg->num_inputs; i++) { 3838 for (i = 0; i < cfg->num_inputs; i++) {
3820 hda_nid_t nid = cfg->inputs[i].pin; 3839 hda_nid_t nid = cfg->inputs[i].pin;
3821 const char *label; 3840 const char *label;
3822 if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) 3841 int j;
3842 int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP;
3843 if (!pin_amp && !multi_adc_volume)
3823 continue; 3844 continue;
3845
3824 label = hda_get_autocfg_input_label(codec, cfg, i); 3846 label = hda_get_autocfg_input_label(codec, cfg, i);
3825 if (label == prev_label) 3847 if (label == prev_label)
3826 cidx++; 3848 cidx++;
3827 else 3849 else
3828 cidx = 0; 3850 cidx = 0;
3829 prev_label = label; 3851 prev_label = label;
3830 err = cx_auto_add_volume(codec, label, " Capture", cidx, 3852
3831 nid, HDA_INPUT); 3853 if (pin_amp) {
3832 if (err < 0) 3854 err = cx_auto_add_volume(codec, label, " Boost", cidx,
3833 return err; 3855 nid, HDA_INPUT);
3856 if (err < 0)
3857 return err;
3858 }
3859
3860 if (!multi_adc_volume)
3861 continue;
3862 for (j = 0; j < conn_len; j++) {
3863 if (conn[j] == nid) {
3864 err = cx_auto_add_volume_idx(codec, label,
3865 " Capture", cidx, adc_nid, HDA_INPUT, j);
3866 if (err < 0)
3867 return err;
3868 break;
3869 }
3870 }
3834 } 3871 }
3835 return 0; 3872 return 0;
3836} 3873}
@@ -3902,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
3902 .patch = patch_cxt5066 }, 3939 .patch = patch_cxt5066 },
3903 { .id = 0x14f15069, .name = "CX20585", 3940 { .id = 0x14f15069, .name = "CX20585",
3904 .patch = patch_cxt5066 }, 3941 .patch = patch_cxt5066 },
3942 { .id = 0x14f1506e, .name = "CX20590",
3943 .patch = patch_cxt5066 },
3905 { .id = 0x14f15097, .name = "CX20631", 3944 { .id = 0x14f15097, .name = "CX20631",
3906 .patch = patch_conexant_auto }, 3945 .patch = patch_conexant_auto },
3907 { .id = 0x14f15098, .name = "CX20632", 3946 { .id = 0x14f15098, .name = "CX20632",
@@ -3928,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
3928MODULE_ALIAS("snd-hda-codec-id:14f15067"); 3967MODULE_ALIAS("snd-hda-codec-id:14f15067");
3929MODULE_ALIAS("snd-hda-codec-id:14f15068"); 3968MODULE_ALIAS("snd-hda-codec-id:14f15068");
3930MODULE_ALIAS("snd-hda-codec-id:14f15069"); 3969MODULE_ALIAS("snd-hda-codec-id:14f15069");
3970MODULE_ALIAS("snd-hda-codec-id:14f1506e");
3931MODULE_ALIAS("snd-hda-codec-id:14f15097"); 3971MODULE_ALIAS("snd-hda-codec-id:14f15097");
3932MODULE_ALIAS("snd-hda-codec-id:14f15098"); 3972MODULE_ALIAS("snd-hda-codec-id:14f15098");
3933MODULE_ALIAS("snd-hda-codec-id:14f150a1"); 3973MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ea48b425d0b..bd7b123f6440 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
586 0x0f, 0x10, 0x11, 0x1f, 0x20, 586 0x0f, 0x10, 0x11, 0x1f, 0x20,
587}; 587};
588 588
589static hda_nid_t stac92hd88xxx_pin_nids[10] = { 589static hda_nid_t stac92hd87xxx_pin_nids[6] = {
590 0x0a, 0x0b, 0x0c, 0x0d,
591 0x0f, 0x11,
592};
593
594static hda_nid_t stac92hd88xxx_pin_nids[8] = {
590 0x0a, 0x0b, 0x0c, 0x0d, 595 0x0a, 0x0b, 0x0c, 0x0d,
591 0x0f, 0x11, 0x1f, 0x20, 596 0x0f, 0x11, 0x1f, 0x20,
592}; 597};
@@ -5430,12 +5435,13 @@ again:
5430 switch (codec->vendor_id) { 5435 switch (codec->vendor_id) {
5431 case 0x111d76d1: 5436 case 0x111d76d1:
5432 case 0x111d76d9: 5437 case 0x111d76d9:
5438 case 0x111d76e5:
5433 spec->dmic_nids = stac92hd87b_dmic_nids; 5439 spec->dmic_nids = stac92hd87b_dmic_nids;
5434 spec->num_dmics = stac92xx_connected_ports(codec, 5440 spec->num_dmics = stac92xx_connected_ports(codec,
5435 stac92hd87b_dmic_nids, 5441 stac92hd87b_dmic_nids,
5436 STAC92HD87B_NUM_DMICS); 5442 STAC92HD87B_NUM_DMICS);
5437 spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); 5443 spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
5438 spec->pin_nids = stac92hd88xxx_pin_nids; 5444 spec->pin_nids = stac92hd87xxx_pin_nids;
5439 spec->mono_nid = 0; 5445 spec->mono_nid = 0;
5440 spec->num_pwrs = 0; 5446 spec->num_pwrs = 0;
5441 break; 5447 break;
@@ -5443,6 +5449,7 @@ again:
5443 case 0x111d7667: 5449 case 0x111d7667:
5444 case 0x111d7668: 5450 case 0x111d7668:
5445 case 0x111d7669: 5451 case 0x111d7669:
5452 case 0x111d76e3:
5446 spec->num_dmics = stac92xx_connected_ports(codec, 5453 spec->num_dmics = stac92xx_connected_ports(codec,
5447 stac92hd88xxx_dmic_nids, 5454 stac92hd88xxx_dmic_nids,
5448 STAC92HD88XXX_NUM_DMICS); 5455 STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
6387 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, 6394 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
6388 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, 6395 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
6389 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, 6396 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
6397 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
6398 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
6390 { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, 6399 { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
6391 {} /* terminator */ 6400 {} /* terminator */
6392}; 6401};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a76c3260d941..63b0054200a8 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
567 hda_nid_t nid = cfg->inputs[i].pin; 567 hda_nid_t nid = cfg->inputs[i].pin;
568 if (spec->smart51_enabled && is_smart51_pins(spec, nid)) 568 if (spec->smart51_enabled && is_smart51_pins(spec, nid))
569 ctl = PIN_OUT; 569 ctl = PIN_OUT;
570 else if (i == AUTO_PIN_MIC) 570 else if (cfg->inputs[i].type == AUTO_PIN_MIC)
571 ctl = PIN_VREF50; 571 ctl = PIN_VREF50;
572 else 572 else
573 ctl = PIN_IN; 573 ctl = PIN_IN;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index bb4bf65b9e7e..0bb424af956f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
367 return 0; 367 return 0;
368} 368}
369 369
370static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; 370static const u8 cx20442_reg;
371 371
372static struct snd_soc_codec_driver cx20442_codec_dev = { 372static struct snd_soc_codec_driver cx20442_codec_dev = {
373 .probe = cx20442_codec_probe, 373 .probe = cx20442_codec_probe,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 987476a5895f..017d99ceb42e 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
1482 WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, 1482 WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
1483 irq_mask); 1483 irq_mask);
1484 1484
1485 if (det && shrt) { 1485 if (det || shrt) {
1486 /* Enable mic detection, this may not have been set through 1486 /* Enable mic detection, this may not have been set through
1487 * platform data (eg, if the defaults are OK). */ 1487 * platform data (eg, if the defaults are OK). */
1488 snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, 1488 snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index e8490f3edd03..e3ec2433b215 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
165 165
166#define WM8903_VMID_RES_50K 2 166#define WM8903_VMID_RES_50K 2
167#define WM8903_VMID_RES_250K 3 167#define WM8903_VMID_RES_250K 3
168#define WM8903_VMID_RES_5K 4 168#define WM8903_VMID_RES_5K 6
169 169
170/* 170/*
171 * R8 (0x08) - Analogue DAC 0 171 * R8 (0x08) - Analogue DAC 0
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 37b8aa8a680f..ebaee5ca7434 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -107,6 +107,9 @@ struct wm8994_priv {
107 107
108 int revision; 108 int revision;
109 struct wm8994_pdata *pdata; 109 struct wm8994_pdata *pdata;
110
111 unsigned int aif1clk_enable:1;
112 unsigned int aif2clk_enable:1;
110}; 113};
111 114
112static int wm8994_readable(unsigned int reg) 115static int wm8994_readable(unsigned int reg)
@@ -1004,6 +1007,93 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
1004 } 1007 }
1005} 1008}
1006 1009
1010static int late_enable_ev(struct snd_soc_dapm_widget *w,
1011 struct snd_kcontrol *kcontrol, int event)
1012{
1013 struct snd_soc_codec *codec = w->codec;
1014 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1015
1016 switch (event) {
1017 case SND_SOC_DAPM_PRE_PMU:
1018 if (wm8994->aif1clk_enable)
1019 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1020 WM8994_AIF1CLK_ENA_MASK,
1021 WM8994_AIF1CLK_ENA);
1022 if (wm8994->aif2clk_enable)
1023 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1024 WM8994_AIF2CLK_ENA_MASK,
1025 WM8994_AIF2CLK_ENA);
1026 break;
1027 }
1028
1029 return 0;
1030}
1031
1032static int late_disable_ev(struct snd_soc_dapm_widget *w,
1033 struct snd_kcontrol *kcontrol, int event)
1034{
1035 struct snd_soc_codec *codec = w->codec;
1036 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1037
1038 switch (event) {
1039 case SND_SOC_DAPM_POST_PMD:
1040 if (wm8994->aif1clk_enable) {
1041 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1042 WM8994_AIF1CLK_ENA_MASK, 0);
1043 wm8994->aif1clk_enable = 0;
1044 }
1045 if (wm8994->aif2clk_enable) {
1046 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1047 WM8994_AIF2CLK_ENA_MASK, 0);
1048 wm8994->aif2clk_enable = 0;
1049 }
1050 break;
1051 }
1052
1053 return 0;
1054}
1055
1056static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1057 struct snd_kcontrol *kcontrol, int event)
1058{
1059 struct snd_soc_codec *codec = w->codec;
1060 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1061
1062 switch (event) {
1063 case SND_SOC_DAPM_PRE_PMU:
1064 wm8994->aif1clk_enable = 1;
1065 break;
1066 }
1067
1068 return 0;
1069}
1070
1071static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1072 struct snd_kcontrol *kcontrol, int event)
1073{
1074 struct snd_soc_codec *codec = w->codec;
1075 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1076
1077 switch (event) {
1078 case SND_SOC_DAPM_PRE_PMU:
1079 wm8994->aif2clk_enable = 1;
1080 break;
1081 }
1082
1083 return 0;
1084}
1085
1086static int dac_ev(struct snd_soc_dapm_widget *w,
1087 struct snd_kcontrol *kcontrol, int event)
1088{
1089 struct snd_soc_codec *codec = w->codec;
1090 unsigned int mask = 1 << w->shift;
1091
1092 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1093 mask, mask);
1094 return 0;
1095}
1096
1007static const char *hp_mux_text[] = { 1097static const char *hp_mux_text[] = {
1008 "Mixer", 1098 "Mixer",
1009 "DAC", 1099 "DAC",
@@ -1272,6 +1362,47 @@ static const struct soc_enum aif2dacr_src_enum =
1272static const struct snd_kcontrol_new aif2dacr_src_mux = 1362static const struct snd_kcontrol_new aif2dacr_src_mux =
1273 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); 1363 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
1274 1364
1365static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
1366SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
1367 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1368SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
1369 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1370
1371SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1372 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1373SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1374 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1375SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1376 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1377SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1378 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1379
1380SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
1381};
1382
1383static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
1384SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
1385SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
1386};
1387
1388static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
1389SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
1390 dac_ev, SND_SOC_DAPM_PRE_PMU),
1391SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
1392 dac_ev, SND_SOC_DAPM_PRE_PMU),
1393SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
1394 dac_ev, SND_SOC_DAPM_PRE_PMU),
1395SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
1396 dac_ev, SND_SOC_DAPM_PRE_PMU),
1397};
1398
1399static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
1400SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
1401SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
1402SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
1403SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1404};
1405
1275static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1406static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
1276SND_SOC_DAPM_INPUT("DMIC1DAT"), 1407SND_SOC_DAPM_INPUT("DMIC1DAT"),
1277SND_SOC_DAPM_INPUT("DMIC2DAT"), 1408SND_SOC_DAPM_INPUT("DMIC2DAT"),
@@ -1284,9 +1415,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
1284SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), 1415SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
1285SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), 1416SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
1286 1417
1287SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
1288SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
1289
1290SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, 1418SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
1291 0, WM8994_POWER_MANAGEMENT_4, 9, 0), 1419 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
1292SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, 1420SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
@@ -1372,11 +1500,6 @@ SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
1372SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), 1500SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1373SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), 1501SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1374 1502
1375SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
1376SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
1377SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
1378SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1379
1380SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1503SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
1381SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), 1504SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
1382 1505
@@ -1516,14 +1639,12 @@ static const struct snd_soc_dapm_route intercon[] = {
1516 { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, 1639 { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
1517 1640
1518 /* DAC1 inputs */ 1641 /* DAC1 inputs */
1519 { "DAC1L", NULL, "DAC1L Mixer" },
1520 { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, 1642 { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
1521 { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, 1643 { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
1522 { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, 1644 { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
1523 { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, 1645 { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
1524 { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, 1646 { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
1525 1647
1526 { "DAC1R", NULL, "DAC1R Mixer" },
1527 { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, 1648 { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
1528 { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, 1649 { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
1529 { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, 1650 { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1532,7 +1653,6 @@ static const struct snd_soc_dapm_route intercon[] = {
1532 1653
1533 /* DAC2/AIF2 outputs */ 1654 /* DAC2/AIF2 outputs */
1534 { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, 1655 { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
1535 { "DAC2L", NULL, "AIF2DAC2L Mixer" },
1536 { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, 1656 { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
1537 { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, 1657 { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
1538 { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, 1658 { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -1540,7 +1660,6 @@ static const struct snd_soc_dapm_route intercon[] = {
1540 { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, 1660 { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
1541 1661
1542 { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, 1662 { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
1543 { "DAC2R", NULL, "AIF2DAC2R Mixer" },
1544 { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, 1663 { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
1545 { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, 1664 { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
1546 { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, 1665 { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1584,6 +1703,24 @@ static const struct snd_soc_dapm_route intercon[] = {
1584 { "Right Headphone Mux", "DAC", "DAC1R" }, 1703 { "Right Headphone Mux", "DAC", "DAC1R" },
1585}; 1704};
1586 1705
1706static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
1707 { "DAC1L", NULL, "Late DAC1L Enable PGA" },
1708 { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
1709 { "DAC1R", NULL, "Late DAC1R Enable PGA" },
1710 { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
1711 { "DAC2L", NULL, "Late DAC2L Enable PGA" },
1712 { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
1713 { "DAC2R", NULL, "Late DAC2R Enable PGA" },
1714 { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
1715};
1716
1717static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
1718 { "DAC1L", NULL, "DAC1L Mixer" },
1719 { "DAC1R", NULL, "DAC1R Mixer" },
1720 { "DAC2L", NULL, "AIF2DAC2L Mixer" },
1721 { "DAC2R", NULL, "AIF2DAC2R Mixer" },
1722};
1723
1587static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { 1724static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
1588 { "AIF1DACDAT", NULL, "AIF2DACDAT" }, 1725 { "AIF1DACDAT", NULL, "AIF2DACDAT" },
1589 { "AIF2DACDAT", NULL, "AIF1DACDAT" }, 1726 { "AIF2DACDAT", NULL, "AIF1DACDAT" },
@@ -2514,6 +2651,22 @@ static int wm8994_resume(struct snd_soc_codec *codec)
2514{ 2651{
2515 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 2652 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
2516 int i, ret; 2653 int i, ret;
2654 unsigned int val, mask;
2655
2656 if (wm8994->revision < 4) {
2657 /* force a HW read */
2658 val = wm8994_reg_read(codec->control_data,
2659 WM8994_POWER_MANAGEMENT_5);
2660
2661 /* modify the cache only */
2662 codec->cache_only = 1;
2663 mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
2664 WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
2665 val &= mask;
2666 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
2667 mask, val);
2668 codec->cache_only = 0;
2669 }
2517 2670
2518 /* Restore the registers */ 2671 /* Restore the registers */
2519 ret = snd_soc_cache_sync(codec); 2672 ret = snd_soc_cache_sync(codec);
@@ -2847,11 +3000,10 @@ static void wm8958_default_micdet(u16 status, void *data)
2847 report |= SND_JACK_BTN_5; 3000 report |= SND_JACK_BTN_5;
2848 3001
2849done: 3002done:
2850 snd_soc_jack_report(wm8994->micdet[0].jack, 3003 snd_soc_jack_report(wm8994->micdet[0].jack, report,
2851 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | 3004 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
2852 SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | 3005 SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
2853 SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, 3006 SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
2854 report);
2855} 3007}
2856 3008
2857/** 3009/**
@@ -3125,6 +3277,17 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3125 case WM8994: 3277 case WM8994:
3126 snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, 3278 snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
3127 ARRAY_SIZE(wm8994_specific_dapm_widgets)); 3279 ARRAY_SIZE(wm8994_specific_dapm_widgets));
3280 if (wm8994->revision < 4) {
3281 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
3282 ARRAY_SIZE(wm8994_lateclk_revd_widgets));
3283 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
3284 ARRAY_SIZE(wm8994_dac_revd_widgets));
3285 } else {
3286 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
3287 ARRAY_SIZE(wm8994_lateclk_widgets));
3288 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
3289 ARRAY_SIZE(wm8994_dac_widgets));
3290 }
3128 break; 3291 break;
3129 case WM8958: 3292 case WM8958:
3130 snd_soc_add_controls(codec, wm8958_snd_controls, 3293 snd_soc_add_controls(codec, wm8958_snd_controls,
@@ -3143,10 +3306,15 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3143 snd_soc_dapm_add_routes(dapm, wm8994_intercon, 3306 snd_soc_dapm_add_routes(dapm, wm8994_intercon,
3144 ARRAY_SIZE(wm8994_intercon)); 3307 ARRAY_SIZE(wm8994_intercon));
3145 3308
3146 if (wm8994->revision < 4) 3309 if (wm8994->revision < 4) {
3147 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, 3310 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
3148 ARRAY_SIZE(wm8994_revd_intercon)); 3311 ARRAY_SIZE(wm8994_revd_intercon));
3149 3312 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
3313 ARRAY_SIZE(wm8994_lateclk_revd_intercon));
3314 } else {
3315 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
3316 ARRAY_SIZE(wm8994_lateclk_intercon));
3317 }
3150 break; 3318 break;
3151 case WM8958: 3319 case WM8958:
3152 snd_soc_dapm_add_routes(dapm, wm8958_intercon, 3320 snd_soc_dapm_add_routes(dapm, wm8958_intercon,
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 613df5db0b32..516892706063 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
674}; 674};
675 675
676static const struct snd_soc_dapm_route analogue_routes[] = { 676static const struct snd_soc_dapm_route analogue_routes[] = {
677 { "MICBIAS1", NULL, "CLK_SYS" },
678 { "MICBIAS2", NULL, "CLK_SYS" },
679
677 { "IN1L PGA", "IN1LP Switch", "IN1LP" }, 680 { "IN1L PGA", "IN1LP Switch", "IN1LP" },
678 { "IN1L PGA", "IN1LN Switch", "IN1LN" }, 681 { "IN1L PGA", "IN1LN Switch", "IN1LN" },
679 682
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index e20c9e1457c0..1e9bccae4e80 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
79 .name = "tlv320aic23", 79 .name = "tlv320aic23",
80 .stream_name = "TLV320AIC23", 80 .stream_name = "TLV320AIC23",
81 .codec_dai_name = "tlv320aic23-hifi", 81 .codec_dai_name = "tlv320aic23-hifi",
82 .platform_name = "imx-pcm-audio.0", 82 .platform_name = "imx-fiq-pcm-audio.0",
83 .codec_name = "tlv320aic23-codec.0-001a", 83 .codec_name = "tlv320aic23-codec.0-001a",
84 .cpu_dai_name = "imx-ssi.0", 84 .cpu_dai_name = "imx-ssi.0",
85 .ops = &eukrea_tlv320_snd_ops, 85 .ops = &eukrea_tlv320_snd_ops,
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 28333e7d9c50..dc65650a6fa1 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
117 { 117 {
118 .name = "AC97", 118 .name = "AC97",
119 .stream_name = "AC97 HiFi", 119 .stream_name = "AC97 HiFi",
120 .cpu_dai_name = "pxa-ac97.0", 120 .cpu_dai_name = "pxa2xx-ac97",
121 .codec_dai_name = "wm9705-hifi", 121 .codec_dai_name = "wm9705-hifi",
122 .platform_name = "pxa-pcm-audio", 122 .platform_name = "pxa-pcm-audio",
123 .codec_name = "wm9705-codec", 123 .codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
126 { 126 {
127 .name = "AC97 Aux", 127 .name = "AC97 Aux",
128 .stream_name = "AC97 Aux", 128 .stream_name = "AC97 Aux",
129 .cpu_dai_name = "pxa-ac97.1", 129 .cpu_dai_name = "pxa2xx-ac97-aux",
130 .codec_dai_name = "wm9705-aux", 130 .codec_dai_name = "wm9705-aux",
131 .platform_name = "pxa-pcm-audio", 131 .platform_name = "pxa-pcm-audio",
132 .codec_name = "wm9705-codec", 132 .codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 01bf31675c55..51897fcd911b 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
99 { 99 {
100 .name = "AC97", 100 .name = "AC97",
101 .stream_name = "AC97 HiFi", 101 .stream_name = "AC97 HiFi",
102 .cpu_dai_name = "pxa-ac97.0", 102 .cpu_dai_name = "pxa2xx-ac97",
103 .codec_dai_name = "wm9705-hifi", 103 .codec_dai_name = "wm9705-hifi",
104 .platform_name = "pxa-pcm-audio", 104 .platform_name = "pxa-pcm-audio",
105 .codec_name = "wm9705-codec", 105 .codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
109 { 109 {
110 .name = "AC97 Aux", 110 .name = "AC97 Aux",
111 .stream_name = "AC97 Aux", 111 .stream_name = "AC97 Aux",
112 .cpu_dai_name = "pxa-ac97.1", 112 .cpu_dai_name = "pxa2xx-ac97-aux",
113 .codec_dai_name ="wm9705-aux", 113 .codec_dai_name ="wm9705-aux",
114 .platform_name = "pxa-pcm-audio", 114 .platform_name = "pxa-pcm-audio",
115 .codec_name = "wm9705-codec", 115 .codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index c6a37c6ef23b..053ed208e59f 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
89 { 89 {
90 .name = "AC97", 90 .name = "AC97",
91 .stream_name = "AC97 HiFi", 91 .stream_name = "AC97 HiFi",
92 .cpu_dai_name = "pxa-ac97.0", 92 .cpu_dai_name = "pxa2xx-ac97",
93 .codec_dai_name = "wm9712-hifi", 93 .codec_dai_name = "wm9712-hifi",
94 .platform_name = "pxa-pcm-audio", 94 .platform_name = "pxa-pcm-audio",
95 .codec_name = "wm9712-codec", 95 .codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
98 { 98 {
99 .name = "AC97 Aux", 99 .name = "AC97 Aux",
100 .stream_name = "AC97 Aux", 100 .stream_name = "AC97 Aux",
101 .cpu_dai_name = "pxa-ac97.1", 101 .cpu_dai_name = "pxa2xx-ac97-aux",
102 .codec_dai_name ="wm9712-aux", 102 .codec_dai_name ="wm9712-aux",
103 .platform_name = "pxa-pcm-audio", 103 .platform_name = "pxa-pcm-audio",
104 .codec_name = "wm9712-codec", 104 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index fc22e6eefc98..b13a4252812d 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
37 { 37 {
38 .name = "AC97", 38 .name = "AC97",
39 .stream_name = "AC97 HiFi", 39 .stream_name = "AC97 HiFi",
40 .cpu_dai_name = "pxa-ac97.0", 40 .cpu_dai_name = "pxa2xx-ac97",
41 .codec_dai_name = "wm9712-hifi", 41 .codec_dai_name = "wm9712-hifi",
42 .platform_name = "pxa-pcm-audio", 42 .platform_name = "pxa-pcm-audio",
43 .codec_name = "wm9712-codec", 43 .codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
45 { 45 {
46 .name = "AC97 Aux", 46 .name = "AC97 Aux",
47 .stream_name = "AC97 Aux", 47 .stream_name = "AC97 Aux",
48 .cpu_dai_name = "pxa-ac97.1", 48 .cpu_dai_name = "pxa2xx-ac97-aux",
49 .codec_dai_name ="wm9712-aux", 49 .codec_dai_name ="wm9712-aux",
50 .platform_name = "pxa-pcm-audio", 50 .platform_name = "pxa-pcm-audio",
51 .codec_name = "wm9712-codec", 51 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 0d70fc8c12bd..38ca6759907e 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
162 { 162 {
163 .name = "AC97", 163 .name = "AC97",
164 .stream_name = "AC97 HiFi", 164 .stream_name = "AC97 HiFi",
165 .cpu_dai_name = "pxa-ac97.0", 165 .cpu_dai_name = "pxa2xx-ac97",
166 .codec_dai_name = "wm9713-hifi", 166 .codec_dai_name = "wm9713-hifi",
167 .codec_name = "wm9713-codec", 167 .codec_name = "wm9713-codec",
168 .init = mioa701_wm9713_init, 168 .init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
172 { 172 {
173 .name = "AC97 Aux", 173 .name = "AC97 Aux",
174 .stream_name = "AC97 Aux", 174 .stream_name = "AC97 Aux",
175 .cpu_dai_name = "pxa-ac97.1", 175 .cpu_dai_name = "pxa2xx-ac97-aux",
176 .codec_dai_name ="wm9713-aux", 176 .codec_dai_name ="wm9713-aux",
177 .codec_name = "wm9713-codec", 177 .codec_name = "wm9713-codec",
178 .platform_name = "pxa-pcm-audio", 178 .platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 857db96d4a4f..504e4004f004 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
132{ 132{
133 .name = "AC97 HiFi", 133 .name = "AC97 HiFi",
134 .stream_name = "AC97 HiFi", 134 .stream_name = "AC97 HiFi",
135 .cpu_dai_name = "pxa-ac97.0", 135 .cpu_dai_name = "pxa2xx-ac97",
136 .codec_dai_name = "wm9712-hifi", 136 .codec_dai_name = "wm9712-hifi",
137 .codec_name = "wm9712-codec", 137 .codec_name = "wm9712-codec",
138 .platform_name = "pxa-pcm-audio", 138 .platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
141{ 141{
142 .name = "AC97 Aux", 142 .name = "AC97 Aux",
143 .stream_name = "AC97 Aux", 143 .stream_name = "AC97 Aux",
144 .cpu_dai_name = "pxa-ac97.1", 144 .cpu_dai_name = "pxa2xx-ac97-aux",
145 .codec_dai_name = "wm9712-aux", 145 .codec_dai_name = "wm9712-aux",
146 .codec_name = "wm9712-codec", 146 .codec_name = "wm9712-codec",
147 .platform_name = "pxa-pcm-audio", 147 .platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index f75804ef0897..4b6e5d608b42 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
219{ 219{
220 .name = "AC97", 220 .name = "AC97",
221 .stream_name = "AC97 HiFi", 221 .stream_name = "AC97 HiFi",
222 .cpu_dai_name = "pxa-ac97.0", 222 .cpu_dai_name = "pxa2xx-ac97",
223 .codec_dai_name = "wm9712-hifi", 223 .codec_dai_name = "wm9712-hifi",
224 .platform_name = "pxa-pcm-audio", 224 .platform_name = "pxa-pcm-audio",
225 .codec_name = "wm9712-codec", 225 .codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
229{ 229{
230 .name = "AC97 Aux", 230 .name = "AC97 Aux",
231 .stream_name = "AC97 Aux", 231 .stream_name = "AC97 Aux",
232 .cpu_dai_name = "pxa-ac97.1", 232 .cpu_dai_name = "pxa2xx-ac97-aux",
233 .codec_dai_name = "wm9712-aux", 233 .codec_dai_name = "wm9712-aux",
234 .platform_name = "pxa-pcm-audio", 234 .platform_name = "pxa-pcm-audio",
235 .codec_name = "wm9712-codec", 235 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b222a7d72027..25bba108fea3 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
166 .stream_name = "AC97 HiFi", 166 .stream_name = "AC97 HiFi",
167 .codec_name = "wm9713-codec", 167 .codec_name = "wm9713-codec",
168 .platform_name = "pxa-pcm-audio", 168 .platform_name = "pxa-pcm-audio",
169 .cpu_dai_name = "pxa-ac97.0", 169 .cpu_dai_name = "pxa2xx-ac97",
170 .codec_name = "wm9713-hifi", 170 .codec_name = "wm9713-hifi",
171 .init = zylonite_wm9713_init, 171 .init = zylonite_wm9713_init,
172}, 172},
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
175 .stream_name = "AC97 Aux", 175 .stream_name = "AC97 Aux",
176 .codec_name = "wm9713-codec", 176 .codec_name = "wm9713-codec",
177 .platform_name = "pxa-pcm-audio", 177 .platform_name = "pxa-pcm-audio",
178 .cpu_dai_name = "pxa-ac97.1", 178 .cpu_dai_name = "pxa2xx-ac97-aux",
179 .codec_name = "wm9713-aux", 179 .codec_name = "wm9713-aux",
180}, 180},
181{ 181{
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 8194f150bab7..25e54230cc6a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
712 !path->connected(path->source, path->sink)) 712 !path->connected(path->source, path->sink))
713 continue; 713 continue;
714 714
715 if (path->sink && path->sink->power_check && 715 if (!path->sink)
716 continue;
717
718 if (path->sink->force) {
719 power = 1;
720 break;
721 }
722
723 if (path->sink->power_check &&
716 path->sink->power_check(path->sink)) { 724 path->sink->power_check(path->sink)) {
717 power = 1; 725 power = 1;
718 break; 726 break;
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
1627int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) 1635int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1628{ 1636{
1629 struct snd_soc_dapm_widget *w; 1637 struct snd_soc_dapm_widget *w;
1638 unsigned int val;
1630 1639
1631 list_for_each_entry(w, &dapm->card->widgets, list) 1640 list_for_each_entry(w, &dapm->card->widgets, list)
1632 { 1641 {
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1675 case snd_soc_dapm_post: 1684 case snd_soc_dapm_post:
1676 break; 1685 break;
1677 } 1686 }
1687
1688 /* Read the initial power state from the device */
1689 if (w->reg >= 0) {
1690 val = snd_soc_read(w->codec, w->reg);
1691 val &= 1 << w->shift;
1692 if (w->invert)
1693 val = !val;
1694
1695 if (val)
1696 w->power = 1;
1697 }
1698
1678 w->new = 1; 1699 w->new = 1;
1679 } 1700 }
1680 1701
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 68b97477577b..66eabafb1c24 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
785 } 785 }
786 786
787 dev->pcm->private_data = dev; 787 dev->pcm->private_data = dev;
788 strcpy(dev->pcm->name, dev->product_name); 788 strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
789 789
790 memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); 790 memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
791 memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); 791 memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c
index 2f218c77fff2..a1a47088fd0c 100644
--- a/sound/usb/caiaq/midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
136 if (ret < 0) 136 if (ret < 0)
137 return ret; 137 return ret;
138 138
139 strcpy(rmidi->name, device->product_name); 139 strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
140 140
141 rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; 141 rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
142 rmidi->private_data = device; 142 rmidi->private_data = device;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb4f251..c0f8270bc199 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
323 return -ENOMEM; 323 return -ENOMEM;
324 } 324 }
325 325
326 mutex_init(&chip->shutdown_mutex);
326 chip->index = idx; 327 chip->index = idx;
327 chip->dev = dev; 328 chip->dev = dev;
328 chip->card = card; 329 chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
531 chip = ptr; 532 chip = ptr;
532 card = chip->card; 533 card = chip->card;
533 mutex_lock(&register_mutex); 534 mutex_lock(&register_mutex);
535 mutex_lock(&chip->shutdown_mutex);
534 chip->shutdown = 1; 536 chip->shutdown = 1;
535 chip->num_interfaces--; 537 chip->num_interfaces--;
536 if (chip->num_interfaces <= 0) { 538 if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
548 snd_usb_mixer_disconnect(p); 550 snd_usb_mixer_disconnect(p);
549 } 551 }
550 usb_chip[chip->index] = NULL; 552 usb_chip[chip->index] = NULL;
553 mutex_unlock(&chip->shutdown_mutex);
551 mutex_unlock(&register_mutex); 554 mutex_unlock(&register_mutex);
552 snd_card_free_when_closed(card); 555 snd_card_free_when_closed(card);
553 } else { 556 } else {
557 mutex_unlock(&chip->shutdown_mutex);
554 mutex_unlock(&register_mutex); 558 mutex_unlock(&register_mutex);
555 } 559 }
556} 560}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522ac90f..e3f680526cb5 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
361 } 361 }
362 362
363 if (changed) { 363 if (changed) {
364 mutex_lock(&subs->stream->chip->shutdown_mutex);
364 /* format changed */ 365 /* format changed */
365 snd_usb_release_substream_urbs(subs, 0); 366 snd_usb_release_substream_urbs(subs, 0);
366 /* influenced: period_bytes, channels, rate, format, */ 367 /* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
368 params_rate(hw_params), 369 params_rate(hw_params),
369 snd_pcm_format_physical_width(params_format(hw_params)) * 370 snd_pcm_format_physical_width(params_format(hw_params)) *
370 params_channels(hw_params)); 371 params_channels(hw_params));
372 mutex_unlock(&subs->stream->chip->shutdown_mutex);
371 } 373 }
372 374
373 return ret; 375 return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
385 subs->cur_audiofmt = NULL; 387 subs->cur_audiofmt = NULL;
386 subs->cur_rate = 0; 388 subs->cur_rate = 0;
387 subs->period_bytes = 0; 389 subs->period_bytes = 0;
388 if (!subs->stream->chip->shutdown) 390 mutex_lock(&subs->stream->chip->shutdown_mutex);
389 snd_usb_release_substream_urbs(subs, 0); 391 snd_usb_release_substream_urbs(subs, 0);
392 mutex_unlock(&subs->stream->chip->shutdown_mutex);
390 return snd_pcm_lib_free_vmalloc_buffer(substream); 393 return snd_pcm_lib_free_vmalloc_buffer(substream);
391} 394}
392 395
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21627ee..6e66fffe87f5 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@ struct snd_usb_audio {
36 struct snd_card *card; 36 struct snd_card *card;
37 u32 usb_id; 37 u32 usb_id;
38 int shutdown; 38 int shutdown;
39 struct mutex shutdown_mutex;
39 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ 40 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
40 int num_interfaces; 41 int num_interfaces;
41 int num_suspended_intf; 42 int num_suspended_intf;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03cb05d..0ace786e83e0 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
264 c->start_time = start; 264 c->start_time = start;
265 if (p->start_time == 0 || p->start_time > start) 265 if (p->start_time == 0 || p->start_time > start)
266 p->start_time = start; 266 p->start_time = start;
267
268 if (cpu > numcpus)
269 numcpus = cpu;
270} 267}
271 268
272#define MAX_CPUS 4096 269#define MAX_CPUS 4096
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used,
511 if (!event_str) 508 if (!event_str)
512 return 0; 509 return 0;
513 510
511 if (sample->cpu > numcpus)
512 numcpus = sample->cpu;
513
514 if (strcmp(event_str, "power:cpu_idle") == 0) { 514 if (strcmp(event_str, "power:cpu_idle") == 0) {
515 struct power_processor_entry *ppe = (void *)te; 515 struct power_processor_entry *ppe = (void *)te;
516 if (ppe->state == (u32)PWR_EVENT_EXIT) 516 if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 32f4f1f2f6e4..df51560f16f7 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
585{ 585{
586 struct sort_entry *se; 586 struct sort_entry *se;
587 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; 587 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
588 u64 nr_events;
588 const char *sep = symbol_conf.field_sep; 589 const char *sep = symbol_conf.field_sep;
589 int ret; 590 int ret;
590 591
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
593 594
594 if (pair_hists) { 595 if (pair_hists) {
595 period = self->pair ? self->pair->period : 0; 596 period = self->pair ? self->pair->period : 0;
597 nr_events = self->pair ? self->pair->nr_events : 0;
596 total = pair_hists->stats.total_period; 598 total = pair_hists->stats.total_period;
597 period_sys = self->pair ? self->pair->period_sys : 0; 599 period_sys = self->pair ? self->pair->period_sys : 0;
598 period_us = self->pair ? self->pair->period_us : 0; 600 period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
600 period_guest_us = self->pair ? self->pair->period_guest_us : 0; 602 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
601 } else { 603 } else {
602 period = self->period; 604 period = self->period;
605 nr_events = self->nr_events;
603 total = session_total; 606 total = session_total;
604 period_sys = self->period_sys; 607 period_sys = self->period_sys;
605 period_us = self->period_us; 608 period_us = self->period_us;
@@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
640 643
641 if (symbol_conf.show_nr_samples) { 644 if (symbol_conf.show_nr_samples) {
642 if (sep) 645 if (sep)
643 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); 646 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
644 else 647 else
645 ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); 648 ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
646 } 649 }
647 650
648 if (pair_hists) { 651 if (pair_hists) {
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index fb737fe9be91..96c866045d60 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -456,9 +456,9 @@ void svg_legenda(void)
456 return; 456 return;
457 457
458 svg_legenda_box(0, "Running", "sample"); 458 svg_legenda_box(0, "Running", "sample");
459 svg_legenda_box(100, "Idle","rect.c1"); 459 svg_legenda_box(100, "Idle","c1");
460 svg_legenda_box(200, "Deeper Idle", "rect.c3"); 460 svg_legenda_box(200, "Deeper Idle", "c3");
461 svg_legenda_box(350, "Deepest Idle", "rect.c6"); 461 svg_legenda_box(350, "Deepest Idle", "c6");
462 svg_legenda_box(550, "Sleeping", "process2"); 462 svg_legenda_box(550, "Sleeping", "process2");
463 svg_legenda_box(650, "Waiting for cpu", "waiting"); 463 svg_legenda_box(650, "Waiting for cpu", "waiting");
464 svg_legenda_box(800, "Blocked on IO", "blocked"); 464 svg_legenda_box(800, "Blocked on IO", "blocked");